Skip to content

Commit

Permalink
fix: handle publishing of a non-latest rev and unpublishing of non-la…
Browse files Browse the repository at this point in the history
…test one (#4279)
  • Loading branch information
adrians5j authored Sep 19, 2024
1 parent 1d23eb7 commit fa945f2
Show file tree
Hide file tree
Showing 3 changed files with 568 additions and 202 deletions.
337 changes: 193 additions & 144 deletions packages/api-headless-cms-ddb-es/src/operations/entry/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1208,14 +1208,6 @@ export const createEntriesStorageOperations = (

const { entry, storageEntry } = transformer.transformEntryKeys();

/**
* We need currently published entry to check if need to remove it.
*/
const [publishedStorageEntry] = await dataLoaders.getPublishedRevisionByEntryId({
model,
ids: [entry.id]
});

const revisionKeys = {
PK: createPartitionKey({
id: entry.id,
Expand Down Expand Up @@ -1259,175 +1251,158 @@ export const createEntriesStorageOperations = (
);
}

const items = [
entity.putBatch({
...storageEntry,
...revisionKeys,
TYPE: createRecordType()
})
];
const esItems: BatchWriteItem[] = [];
if (!latestEsEntry) {
throw new WebinyError(
`Could not publish entry. Could not load latest ("L") record (ES table).`,
"PUBLISH_ERROR",
{ entry }
);
}

const { index: esIndex } = configurations.es({
model
/**
* We need the latest entry to check if it needs to be updated as well in the Elasticsearch.
*/
const [latestStorageEntry] = await dataLoaders.getLatestRevisionByEntryId({
model,
ids: [entry.id]
});

if (publishedStorageEntry && publishedStorageEntry.id !== entry.id) {
/**
* If there is a `published` entry already, we need to set it to `unpublished`. We need to
* execute two updates: update the previously published entry's status and the published entry record.
* DynamoDB does not support `batchUpdate` - so here we load the previously published
* entry's data to update its status within a batch operation. If, hopefully,
* they introduce a true update batch operation, remove this `read` call.
*/
const [previouslyPublishedEntry] = await dataLoaders.getRevisionById({
model,
ids: [publishedStorageEntry.id]
});
items.push(
/**
* Update currently published entry (unpublish it)
*/
entity.putBatch({
...previouslyPublishedEntry,
status: CONTENT_ENTRY_STATUS.UNPUBLISHED,
TYPE: createRecordType(),
PK: createPartitionKey(publishedStorageEntry),
SK: createRevisionSortKey(publishedStorageEntry)
})
if (!latestStorageEntry) {
throw new WebinyError(
`Could not publish entry. Could not load latest ("L") record.`,
"PUBLISH_ERROR",
{ entry }
);
}

/**
* Update the helper item in DB with the new published entry
* We need currently published entry to check if need to remove it.
*/
items.push(
const [publishedStorageEntry] = await dataLoaders.getPublishedRevisionByEntryId({
model,
ids: [entry.id]
});

// 1. Update REV# and P records with new data.
const items = [
entity.putBatch({
...storageEntry,
...revisionKeys,
TYPE: createRecordType()
}),
entity.putBatch({
...storageEntry,
...publishedKeys,
TYPE: createPublishedRecordType()
})
);
];
const esItems: BatchWriteItem[] = [];

/**
* We need the latest entry to check if it needs to be updated as well in the Elasticsearch.
*/
const [latestStorageEntry] = await dataLoaders.getLatestRevisionByEntryId({
model,
ids: [entry.id]
const { index: esIndex } = configurations.es({
model
});

if (latestStorageEntry?.id === entry.id) {
// 2. When it comes to the latest record, we need to perform a couple of different
// updates, based on whether the entry being published is the latest revision or not.
const publishedRevisionId = publishedStorageEntry?.id;
const publishingLatestRevision = latestStorageEntry?.id === entry.id;

if (publishingLatestRevision) {
// 2.1 If we're publishing the latest revision, we first need to update the L record.
items.push(
entity.putBatch({
...storageEntry,
...latestKeys
})
);
}

if (latestEsEntry) {
const publishingLatestRevision = latestStorageEntry?.id === entry.id;

/**
* Need to decompress the data from Elasticsearch DynamoDB table.
*
* No need to transform it for the storage because it was fetched
* directly from the Elasticsearch table, where it sits transformed.
*/
const latestEsEntryDataDecompressed = (await decompress(
plugins,
latestEsEntry.data
)) as CmsIndexEntry;

if (publishingLatestRevision) {
const updatedMetaFields = pickEntryMetaFields(entry);

const latestTransformer = createTransformer({
plugins,
model,
transformedToIndex: {
...latestEsEntryDataDecompressed,
status: CONTENT_ENTRY_STATUS.PUBLISHED,
locked: true,
...updatedMetaFields
}
});

esItems.push(
esEntity.putBatch({
index: esIndex,
PK: createPartitionKey(latestEsEntryDataDecompressed),
SK: createLatestSortKey(),
data: await latestTransformer.getElasticsearchLatestEntryData()
})
);
} else {
const updatedEntryLevelMetaFields = pickEntryMetaFields(
entry,
isEntryLevelEntryMetaField
);

const updatedLatestStorageEntry = {
...latestStorageEntry,
...latestKeys,
...updatedEntryLevelMetaFields
};

/**
* First we update the regular DynamoDB table. Two updates are needed:
* - one for the actual revision record
* - one for the latest record
*/
items.push(
entity.putBatch({
...updatedLatestStorageEntry,
PK: createPartitionKey({
id: latestStorageEntry.id,
locale: model.locale,
tenant: model.tenant
}),
SK: createRevisionSortKey(latestStorageEntry),
TYPE: createRecordType()
})
);
// 2.2 Additionally, if we have a previously published entry, we need to mark it as unpublished.
// Note that we need to take re-publishing into account (same published revision being
// published again), in which case the below code does not apply. This is because the
// required updates were already applied above.
if (publishedStorageEntry) {
const isRepublishing = publishedStorageEntry.id === entry.id;
if (!isRepublishing) {
items.push(
/**
* Update currently published entry (unpublish it)
*/
entity.putBatch({
...publishedStorageEntry,
status: CONTENT_ENTRY_STATUS.UNPUBLISHED,
TYPE: createRecordType(),
PK: createPartitionKey(publishedStorageEntry),
SK: createRevisionSortKey(publishedStorageEntry)
})
);
}
}
} else {
// 2.3 If the published revision is not the latest one, the situation is a bit
// more complex. We first need to update the L and REV# records with the new
// values of *only entry-level* meta fields.
const updatedEntryLevelMetaFields = pickEntryMetaFields(
entry,
isEntryLevelEntryMetaField
);

items.push(
entity.putBatch({
...updatedLatestStorageEntry,
TYPE: createLatestRecordType()
})
);
// 2.4 Update L record. Apart from updating the entry-level meta fields, we also need
// to change the status from "published" to "unpublished" (if the status is set to "published").
let latestRevisionStatus = latestStorageEntry.status;
if (latestRevisionStatus === CONTENT_ENTRY_STATUS.PUBLISHED) {
latestRevisionStatus = CONTENT_ENTRY_STATUS.UNPUBLISHED;
}

/**
* Update the Elasticsearch table to propagate changes to the Elasticsearch.
*/
const latestEsEntry = await getClean<ElasticsearchDbRecord>({
entity: esEntity,
keys: latestKeys
});
const latestStorageEntryFields = {
...latestStorageEntry,
...updatedEntryLevelMetaFields,
status: latestRevisionStatus
};

if (latestEsEntry) {
const latestEsEntryDataDecompressed = (await decompress(
plugins,
latestEsEntry.data
)) as CmsIndexEntry;
items.push(
entity.putBatch({
...latestStorageEntryFields,
PK: createPartitionKey(latestStorageEntry),
SK: createLatestSortKey(),
TYPE: createLatestRecordType()
})
);

const updatedLatestEntry = await compress(plugins, {
...latestEsEntryDataDecompressed,
...updatedEntryLevelMetaFields
});
// 2.5 Update REV# record.
items.push(
entity.putBatch({
...latestStorageEntryFields,
PK: createPartitionKey(latestStorageEntry),
SK: createRevisionSortKey(latestStorageEntry),
TYPE: createRecordType()
})
);

esItems.push(
esEntity.putBatch({
...latestKeys,
index: esIndex,
data: updatedLatestEntry
// 2.6 Additionally, if we have a previously published entry, we need to mark it as unpublished.
// Note that we need to take re-publishing into account (same published revision being
// published again), in which case the below code does not apply. This is because the
// required updates were already applied above.
if (publishedStorageEntry) {
const isRepublishing = publishedStorageEntry.id === entry.id;
const publishedRevisionDifferentFromLatest =
publishedRevisionId !== latestStorageEntry.id;

if (!isRepublishing && publishedRevisionDifferentFromLatest) {
items.push(
entity.putBatch({
...publishedStorageEntry,
PK: createPartitionKey(publishedStorageEntry),
SK: createRevisionSortKey(publishedStorageEntry),
TYPE: createRecordType(),
status: CONTENT_ENTRY_STATUS.UNPUBLISHED
})
);
}
}
}

// 3. Update records in ES -> DDB table.

/**
* Update the published revision entry in ES.
*/
Expand All @@ -1440,6 +1415,80 @@ export const createEntriesStorageOperations = (
})
);

/**
* Need to decompress the data from Elasticsearch DynamoDB table.
*
* No need to transform it for the storage because it was fetched
* directly from the Elasticsearch table, where it sits transformed.
*/
const latestEsEntryDataDecompressed = (await decompress(
plugins,
latestEsEntry.data
)) as CmsIndexEntry;

if (publishingLatestRevision) {
const updatedMetaFields = pickEntryMetaFields(entry);

const latestTransformer = createTransformer({
plugins,
model,
transformedToIndex: {
...latestEsEntryDataDecompressed,
status: CONTENT_ENTRY_STATUS.PUBLISHED,
locked: true,
...updatedMetaFields
}
});

esItems.push(
esEntity.putBatch({
index: esIndex,
PK: createPartitionKey(latestEsEntryDataDecompressed),
SK: createLatestSortKey(),
data: await latestTransformer.getElasticsearchLatestEntryData()
})
);
} else {
const updatedEntryLevelMetaFields = pickEntryMetaFields(
entry,
isEntryLevelEntryMetaField
);

/**
* Update the Elasticsearch table to propagate changes to the Elasticsearch.
*/
const latestEsEntry = await getClean<ElasticsearchDbRecord>({
entity: esEntity,
keys: latestKeys
});

if (latestEsEntry) {
const latestEsEntryDataDecompressed = (await decompress(
plugins,
latestEsEntry.data
)) as CmsIndexEntry;

let latestRevisionStatus = latestEsEntryDataDecompressed.status;
if (latestRevisionStatus === CONTENT_ENTRY_STATUS.PUBLISHED) {
latestRevisionStatus = CONTENT_ENTRY_STATUS.UNPUBLISHED;
}

const updatedLatestEntry = await compress(plugins, {
...latestEsEntryDataDecompressed,
...updatedEntryLevelMetaFields,
status: latestRevisionStatus
});

esItems.push(
esEntity.putBatch({
...latestKeys,
index: esIndex,
data: updatedLatestEntry
})
);
}
}

/**
* Finally, execute regular table batch.
*/
Expand Down
Loading

0 comments on commit fa945f2

Please sign in to comment.