Skip to content

Commit

Permalink
fix: optimizations to sqlite, fixed create domain API bug, use SQLite…
Browse files Browse the repository at this point in the history
… RETURNING to prevent double queries, added transactions, sync wildduck changes, fixed binary string issue due to getStream encoding option, remove bulkWrite
  • Loading branch information
titanism committed Jun 26, 2024
1 parent efb0255 commit 5c53b88
Show file tree
Hide file tree
Showing 37 changed files with 741 additions and 1,080 deletions.
11 changes: 5 additions & 6 deletions ansible/playbooks/security.yml
Original file line number Diff line number Diff line change
Expand Up @@ -536,16 +536,15 @@
# https://github.com/Oefenweb/ansible-dns
- role: dns
dns_nameservers:
# google (temporary due to cloudflare ESERVFAIL and EAI_AGAIN errors)
# cloudflare and google (ipv4/ipv6 and rotated)
- 1.1.1.1
- 2606:4700:4700::1111
- 8.8.8.8
- 8.8.4.4
- 2001:4860:4860::8888
- 2001:4860:4860::8844
# cloudflare
- 1.1.1.1
- 1.0.0.1
- 2606:4700:4700::1111
- 2606:4700:4700::1001
- 8.8.4.4
- 2001:4860:4860::8844

# https://github.com/Oefenweb/ansible-ntp
- role: ntp
Expand Down
8 changes: 7 additions & 1 deletion app/controllers/web/my-account/create-domain.js
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,13 @@ async function createDomain(ctx, next) {
// if there was a payment required error before creating the domain
// it indicates that the domain was most likely a malicious extension
// redirect to /my-account/domains/new?domain=$domain&plan=enhanced_protection
if (err && err.isBoom && err.output && err.output.statusCode === 402) {
if (
!ctx.api &&
err &&
err.isBoom &&
err.output &&
err.output.statusCode === 402
) {
const redirectTo = ctx.state.l(
`/my-account/billing/upgrade?plan=enhanced_protection&domain=${ctx.request.body.domain}`
);
Expand Down
23 changes: 10 additions & 13 deletions app/models/aliases.js
Original file line number Diff line number Diff line change
Expand Up @@ -744,20 +744,17 @@ Aliases.statics.isOverQuota = async function (

// cache the values of storageUsed and isOverQuota for 5-10s
if (size === 0)
client
.set(
`alias_quota:${alias.id}`,
JSON.stringify({
storageUsed,
maxQuotaPerAlias
}),
'PX',
10000
)
.then()
.catch((err) => logger.fatal(err));
await client.set(
`alias_quota:${alias.id}`,
JSON.stringify({
storageUsed,
maxQuotaPerAlias
}),
'PX',
10000
);

return { storageUsed, isOverQuota };
return { storageUsed, isOverQuota, maxQuotaPerAlias };
};

Aliases.methods.createToken = async function (
Expand Down
39 changes: 7 additions & 32 deletions app/models/threads.js
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ mongoose.Error.messages = require('@ladjs/mongoose-error-messages');
const {
dummyProofModel,
dummySchemaOptions,
convertResult,
syncConvertResult,
sqliteVirtualDB
} = require('#helpers/mongoose-to-sqlite');

Expand Down Expand Up @@ -126,7 +126,7 @@ async function getThreadId(instance, session, subject, mimeTree) {
}

if (thread) {
thread = await convertResult(this, thread);
thread = syncConvertResult(this, thread);

for (const id of referenceIds) {
thread.ids.push(id);
Expand All @@ -145,37 +145,12 @@ async function getThreadId(instance, session, subject, mimeTree) {
},
modifier: {
ids: safeStringify(thread.ids)
}
},
returning: ['*']
});

// result of this will be like:
// `{ changes: 1, lastInsertRowid: 11 }`

// use websockets if readonly
if (session.db.readonly) {
await instance.wsp.request({
action: 'stmt',
session: { user: session.user },
stmt: [
['prepare', sql.query],
['run', sql.values]
]
});
} else {
session.db.prepare(sql.query).run(sql.values);
}
}

{
const sql = builder.build({
type: 'select',
table: 'Threads',
condition: {
_id: thread._id.toString()
}
});

if (instance.wsp) {
thread = await instance.wsp.request({
action: 'stmt',
session: { user: session.user },
Expand All @@ -187,10 +162,10 @@ async function getThreadId(instance, session, subject, mimeTree) {
} else {
thread = session.db.prepare(sql.query).get(sql.values);
}

if (!thread) throw new TypeError('Thread does not exist');
thread = await convertResult(this, thread);
}

if (!thread) throw new TypeError('Thread does not exist');
thread = syncConvertResult(this, thread);
}
}

Expand Down
190 changes: 121 additions & 69 deletions helpers/attachment-storage.js
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ const intoStream = require('into-stream');
const pify = require('pify');
const revHash = require('rev-hash');
const _ = require('lodash');
const { Builder } = require('json-sql');

const WildDuckAttachmentStorage = require('wildduck/lib/attachment-storage');

Expand All @@ -30,6 +31,59 @@ const WildDuckAttachmentStorage = require('wildduck/lib/attachment-storage');

const Attachments = require('#models/attachments');
const logger = require('#helpers/logger');
const { acquireLock, releaseLock } = require('#helpers/lock');
const { syncConvertResult } = require('#helpers/mongoose-to-sqlite');

const builder = new Builder();

function updateAttachments(attachmentIds, magic, session) {
const sql = builder.build({
type: 'update',
table: 'Attachments',
condition: {
hash: { $in: attachmentIds }
},
modifier: {
$inc: {
counter: -1,
magic: -magic
},
$set: {
counterUpdated: new Date().toISOString()
}
},
returning: ['_id', 'counter', 'magic']
});

// update attachment data
const attachments = session.db.prepare(sql.query).all(sql.values);

// delete attachments if necessary
const $in = [];
for (const attachment of attachments) {
if (attachment.counter === 0 && attachment.magic === 0) {
$in.push(attachment._id);
}
}

//
// NOTE: wildduck has this disabled (as they have a cleanup job that runs after a duration)
// (e.g. if a user quickly re-adds the attachment, it would save the re-creation by hash lookup)
// (but to keep things simple for now we're just going to delete it)
//
if ($in.length > 0) {
const sql = builder.build({
type: 'remove',
table: 'Attachments',
condition: {
_id: {
$in
}
}
});
session.db.prepare(sql.query).run(sql.values);
}
}

class AttachmentStorage {
constructor(options) {
Expand Down Expand Up @@ -70,51 +124,54 @@ class AttachmentStorage {
};
}

async create(instance, session, attachment) {
const hex = await this.calculateHashPromise(attachment.body);
attachment.hash = revHash(Buffer.from(hex, 'hex'));
attachment.counter = 1;
attachment.counterUpdated = new Date();
attachment.size = attachment.body.length;
if (
Number.isNaN(attachment.magic) ||
typeof attachment.magic !== 'number'
) {
async create(instance, session, node) {
const hex = await this.calculateHashPromise(node.body);
node.hash = revHash(Buffer.from(hex, 'hex'));
node.counter = 1;
node.counterUpdated = new Date();
node.size = node.body.length;
if (Number.isNaN(node.magic) || typeof node.magic !== 'number') {
const err = new TypeError('Invalid magic');
err.attachment = attachment;
err.node = node;
throw err;
}

const result = await Attachments.findOneAndUpdate(
instance,
session,
{
hash: attachment.hash
const sql = builder.build({
type: 'update',
table: 'Attachments',
condition: {
hash: node.hash
},
{
modifier: {
$inc: {
counter: 1,
magic: attachment.magic
magic: node.magic
},
$set: {
counterUpdated: new Date()
counterUpdated: new Date().toISOString()
}
},
{
returnDocument: 'after'
}
);
returning: ['*']
});

if (result) return result;
const result = session.db.prepare(sql.query).get(sql.values);
if (result) return syncConvertResult(Attachments, result);

// virtual helper for locking if we lock in advance
// attachment.lock = lock
// TODO: finish this INSERT statement with validation of a field returned
// const attachment = new Attachments(node);
// await attachment.validate();
// {
// const sql = builder.build({
// type: 'insert',
// });
// // TODO: finish this
// }

// virtual helper
attachment.instance = instance;
attachment.session = session;
node.instance = instance;
node.session = session;

return Attachments.create(attachment);
return Attachments.create(node);
}

//
Expand Down Expand Up @@ -143,55 +200,50 @@ class AttachmentStorage {
}

// eslint-disable-next-line max-params
async deleteMany(instance, session, attachmentIds, magic, lock = false) {
async deleteMany(
instance,
session,
attachmentIds,
magic,
lock = false,
isTransaction = true
) {
if (Number.isNaN(magic) || typeof magic !== 'number') {
const err = new TypeError('Invalid magic');
err.attachmentIds = attachmentIds;
err.magic = magic;
throw err;
}

const attachments = await Attachments.updateMany(
instance,
session,
{
hash: { $in: attachmentIds }
},
{
$inc: {
counter: -1,
magic: -magic
},
$set: {
counterUpdated: new Date()
}
},
{
lock,
returnDocument: 'after'
if (instance.wsp) throw new TypeError('WSP instance invalid');

let newLock;
if (!lock) newLock = await acquireLock(this, session.db);

let err;

try {
if (isTransaction) {
updateAttachments(attachmentIds, magic, session);
} else {
session.db.transaction(() => {
updateAttachments(attachmentIds, magic, session);
});
}
);
} catch (_err) {
err = _err;
}

//
// NOTE: wildduck has this disabled (as they have a cleanup job that runs after a duration)
// (e.g. if a user quickly re-adds the attachment, it would save the re-creation by hash lookup)
// (but to keep things simple for now we're just going to delete it)
//
await Promise.all(
attachments.map(async (attachment) => {
try {
if (attachment.counter === 0 && attachment.magic === 0)
await Attachments.deleteOne(
instance,
session,
{ _id: attachment._id },
{ lock }
);
} catch (err) {
logger.fatal(err, { attachment });
}
})
);
// release lock
if (newLock?.success) {
try {
await releaseLock(instance, session.db, newLock);
} catch (err) {
logger.fatal(err, { attachmentIds, magic });
}
}

if (err) throw err;
}
}

Expand Down
2 changes: 1 addition & 1 deletion helpers/create-tangerine.js
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ function createTangerine(
// speeds up tests x2 if any DNS errors detected
timeout: env.NODE_ENV === 'production' ? 10000 : 5000,
tries: env.NODE_ENV === 'production' ? 4 : 2,
servers: new Set(['8.8.8.8', '8.8.4.4', '1.1.1.1', '1.0.0.1']),
servers: new Set(['1.1.1.1', '8.8.8.8', '1.0.0.1', '8.8.4.4']),
setCacheArgs(key, result) {
return ['PX', Math.round(result.ttl * 1000)];
}
Expand Down
Loading

0 comments on commit 5c53b88

Please sign in to comment.