Skip to content

Commit

Permalink
update logic in dozer/getSnapshot
Browse files Browse the repository at this point in the history
  • Loading branch information
alvrs committed Aug 1, 2024
1 parent 8b30a70 commit 06b93b7
Showing 1 changed file with 42 additions and 38 deletions.
80 changes: 42 additions & 38 deletions packages/store-sync/src/dozer/getSnapshot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@ export async function getSnapshot({
startBlock = 0n,
chainId,
}: GetSnapshotArgs): Promise<GetSnapshotResult> {
const initialBlockLogs: StorageAdapterBlock = { blockNumber: startBlock, logs: [] };

// We execute the list of provided SQL queries for hydration. For performance
// reasons the queries are not executed against a fixed block height, but against
// the latest state. We therefore pass the min block number of all query results
Expand All @@ -38,47 +36,53 @@ export async function getSnapshot({

const sqlFilters = filters ? (filters.filter((filter) => "sql" in filter) as TableQuery[]) : [];

// Execute individual SQL queries as separate requests to parallelize on the backend.
// Each individual request is expected to be executed against the same db state so it
// can't be parallelized.
const dozerTables = (
await Promise.all(sqlFilters.map((filter) => fetchRecordsSql({ dozerUrl, storeAddress, queries: [filter] })))
).filter(isDefined);
const fetchLogs = async (): Promise<StorageAdapterBlock | undefined> => {
// Fetch the tables without SQL filter from the snapshot logs API for better performance.
const logsFilters =
filters &&
filters
.filter((filter) => !("sql" in filter))
.map((filter) => {
const { table, key0, key1 } = filter as LogFilter;
return { tableId: table.tableId, key0, key1 } as LegacyLogFilter;
});

if (dozerTables.length > 0) {
// Use the minimum block number of all query results as the block number to start syncing from.
initialBlockLogs.blockNumber = bigIntMin(...dozerTables.map((result) => result.blockHeight));
initialBlockLogs.logs = dozerTables.flatMap(({ result: [{ table, records }] }) =>
records.map((record) => recordToLog({ table, record, address: storeAddress })),
);
}
if (logsFilters && logsFilters.length === 0) {
return undefined;
}

// Fetch the tables without SQL filter from the snapshot logs API for better performance.
const logsFilters =
filters &&
filters
.filter((filter) => !("sql" in filter))
.map((filter) => {
const { table, key0, key1 } = filter as LogFilter;
return { tableId: table.tableId, key0, key1 } as LegacyLogFilter;
});
return getSnapshotLogs({
chainId,
address: storeAddress,
filters: logsFilters,
indexerUrl: dozerUrl,
});
};

const logs =
// If no filters are provided, the entire state is fetched
!logsFilters || logsFilters.length > 0
? await getSnapshotLogs({
chainId,
address: storeAddress,
filters: logsFilters,
indexerUrl: dozerUrl,
})
: undefined;
const fetchSql = (): Promise<StorageAdapterBlock | undefined>[] => {
return sqlFilters.map(async (filter) => {
const result = await fetchRecordsSql({ dozerUrl, storeAddress, queries: [filter] });
return (
result && {
blockNumber: result.blockHeight,
logs: result.result.flatMap(({ table, records }) =>
records.map((record) => recordToLog({ table, record, address: storeAddress })),
),
}
);
});
};

// Execute individual SQL queries as separate requests to parallelize on the backend.
// Each individual request is expected to be executed against the same db state so it
// can't be parallelized.
const results = (await Promise.all([fetchLogs(), ...fetchSql()])).filter(isDefined);

// The block number passed in the overall result will be the min of all queries and the logs.
if (logs) {
initialBlockLogs.blockNumber = bigIntMin(initialBlockLogs.blockNumber, logs.blockNumber);
initialBlockLogs.logs = [...initialBlockLogs.logs, ...logs.logs];
}
const initialBlockLogs = {
blockNumber: results.length > 0 ? bigIntMin(...results.map((result) => result.blockNumber)) : startBlock,
logs: results.flatMap((result) => result.logs),
};

return { initialBlockLogs };
}

0 comments on commit 06b93b7

Please sign in to comment.