diff --git a/.changeset/ninety-carpets-watch.md b/.changeset/ninety-carpets-watch.md index 618d280a153..9b0c215837a 100644 --- a/.changeset/ninety-carpets-watch.md +++ b/.changeset/ninety-carpets-watch.md @@ -1,5 +1,5 @@ --- -"@internal/benchmarks": minor +"@internal/benchmarks": patch --- chore: run benchmarking utility in devnet environment diff --git a/.github/workflows/bench-testnet.yaml b/.github/workflows/bench-devnet.yaml similarity index 100% rename from .github/workflows/bench-testnet.yaml rename to .github/workflows/bench-devnet.yaml diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 96f6a93255b..075d7789e5b 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,27 +1,28 @@ -name: Benchmarks -on: - pull_request: - branches: - - master - push: - branches-ignore: - - master +# Uncomment this when we want to run benchmarks on PRs +# name: Benchmarks +# on: +# pull_request: +# branches: +# - master +# push: +# branches-ignore: +# - master -jobs: - benchmarks: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 +# jobs: +# benchmarks: +# runs-on: ubuntu-latest +# steps: +# - name: Checkout +# uses: actions/checkout@v4 - - name: CI Setup - uses: ./.github/actions/test-setup +# - name: CI Setup +# uses: ./.github/actions/test-setup - - name: Pretest - run: pnpm pretest +# - name: Pretest +# run: pnpm pretest - - name: Run Node benchmarks - uses: CodSpeedHQ/action@v3 - with: - run: pnpm bench:node - token: ${{ secrets.CODSPEED_TOKEN }} +# - name: Run Node benchmarks +# uses: CodSpeedHQ/action@v3 +# with: +# run: pnpm bench:node +# token: ${{ secrets.CODSPEED_TOKEN }} diff --git a/internal/benchmarks/src/contract-interaction.bench.ts b/internal/benchmarks/src/contract-interaction.bench.ts index 774b77b9d14..e0c00f6801e 100644 --- a/internal/benchmarks/src/contract-interaction.bench.ts +++ b/internal/benchmarks/src/contract-interaction.bench.ts @@ -5,7 +5,11 @@ import { launchTestNode, TestAssetId } from 'fuels/test-utils'; import { bench } from 'vitest'; import type { CounterContract, CallTestContract } from '../test/typegen/contracts'; -import { CounterContractFactory, CallTestContractFactory } from '../test/typegen/contracts'; +import { + CounterContractFactory, + CallTestContractFactory, + PythContractFactory, +} from '../test/typegen/contracts'; import { DEVNET_CONFIG } from './config'; /** @@ -44,38 +48,60 @@ describe('Contract Interaction Benchmarks', () => { }); } - bench('should successfully execute a contract read function', async () => { - const tx = await contract.functions.get_count().call(); + bench('should successfully execute a contract read function 10 times', async () => { + for (let i = 0; i < 10; i++) { + const tx = await contract.functions.get_count().call(); - const { value } = await tx.waitForResult(); + const { value } = await tx.waitForResult(); - expect(JSON.stringify(value)).toEqual(JSON.stringify(bn(0))); + expect(JSON.stringify(value)).toEqual(JSON.stringify(bn(0))); + } }); - bench('should successfully execute a contract multi call', async () => { - const tx = await contract - .multiCall([contract.functions.increment_counter(100), contract.functions.get_count()]) - .call(); + bench('should successfully execute a contract multi call 10 times', async () => { + const initialValue = 100; + for (let i = 1; i < 11; i++) { + const tx = await contract + .multiCall([contract.functions.increment_counter(100), contract.functions.get_count()]) + .call(); - const { value } = await tx.waitForResult(); + const { value } = await tx.waitForResult(); - expect(JSON.stringify(value)).toEqual(JSON.stringify([bn(100), bn(100)])); + expect(JSON.stringify(value)).toEqual( + JSON.stringify([bn(initialValue * i), bn(initialValue * i)]) + ); + } }); - bench('should successfully write to a contract', async () => { - const tx = await contract.functions.increment_counter(100).call(); - await tx.waitForResult(); + bench('should successfully write to a contract 10 times', async () => { + for (let i = 0; i < 10; i++) { + const tx = await contract.functions.increment_counter(100).call(); + await tx.waitForResult(); + } }); - bench('should successfully execute a contract mint', async () => { - const tx = await callTestContract.functions.mint_coins(TestAssetId.A.value, bn(100)).call(); + bench('should successfully execute a contract mint 10 times', async () => { + for (let i = 0; i < 10; i++) { + const tx = await callTestContract.functions.mint_coins(TestAssetId.A.value, bn(100)).call(); + await tx.waitForResult(); + } + }); + + bench('should successfully execute a contract deploy 10 times', async () => { + for (let i = 0; i < 10; i++) { + const factory = new CounterContractFactory(wallet); + const { waitForResult } = await factory.deploy(); + const { contract: deployedContract } = await waitForResult(); - await tx.waitForResult(); + expect(deployedContract).toBeDefined(); + } }); - bench('should successfully execute a contract deploy', async () => { - const factory = new CounterContractFactory(wallet); - const { waitForResult } = await factory.deploy(); + bench('should successfully execute a contract deploy as blobs', async () => { + const factory = new PythContractFactory(wallet); + const { waitForResult } = await factory.deployAsBlobTx({ + chunkSizeMultiplier: 0.9, + }); const { contract: deployedContract } = await waitForResult(); expect(deployedContract).toBeDefined(); diff --git a/internal/benchmarks/src/cost-estimation.bench.ts b/internal/benchmarks/src/cost-estimation.bench.ts index b34b63db5e2..0796b7249db 100644 --- a/internal/benchmarks/src/cost-estimation.bench.ts +++ b/internal/benchmarks/src/cost-estimation.bench.ts @@ -1,6 +1,6 @@ /* eslint-disable import/no-extraneous-dependencies */ -import type { TransferParams, WalletUnlocked } from 'fuels'; +import type { TransferParams, WalletUnlocked, BytesLike } from 'fuels'; import { Wallet, Provider, ScriptTransactionRequest } from 'fuels'; import { launchTestNode, TestAssetId } from 'fuels/test-utils'; import { bench } from 'vitest'; @@ -51,8 +51,17 @@ describe('Cost Estimation Benchmarks', () => { beforeAll(async () => { const { networkUrl } = DEVNET_CONFIG; provider = await Provider.create(networkUrl); + const wallet = Wallet.fromPrivateKey( + process.env.DEVNET_WALLET_PVT_KEY as BytesLike, + provider + ); setup(provider); + + const contractFactory = new CallTestContractFactory(wallet); + const { waitForResult } = await contractFactory.deploy(); + const { contract: deployedContract } = await waitForResult(); + contract = deployedContract; }); } else { beforeEach(async () => { diff --git a/internal/benchmarks/src/transaction-results.bench.ts b/internal/benchmarks/src/transaction-results.bench.ts index 3a5d22d358f..477e25436b3 100644 --- a/internal/benchmarks/src/transaction-results.bench.ts +++ b/internal/benchmarks/src/transaction-results.bench.ts @@ -84,37 +84,41 @@ describe('Transaction Submission Benchmarks', () => { } }); - bench('should successfully perform a batch transfer', async () => { - const amountToTransfer1 = 989; - const amountToTransfer2 = 699; - const amountToTransfer3 = 122; - - const transferParams: TransferParams[] = [ - { - destination: receiver1.address, - amount: amountToTransfer1, - assetId: provider.getBaseAssetId(), - }, - { destination: receiver2.address, amount: amountToTransfer2, assetId: TestAssetId.A.value }, - { destination: receiver3.address, amount: amountToTransfer3, assetId: TestAssetId.B.value }, - ]; - - const tx = await wallet.batchTransfer(transferParams); + bench('should successfully perform a batch transfer 10 times', async () => { + for (let i = 0; i < 10; i++) { + const amountToTransfer1 = 989; + const amountToTransfer2 = 699; + const amountToTransfer3 = 122; - const { isStatusSuccess } = await tx.waitForResult(); + const transferParams: TransferParams[] = [ + { + destination: receiver1.address, + amount: amountToTransfer1, + assetId: provider.getBaseAssetId(), + }, + { destination: receiver2.address, amount: amountToTransfer2, assetId: TestAssetId.A.value }, + { destination: receiver3.address, amount: amountToTransfer3, assetId: TestAssetId.B.value }, + ]; - expect(isStatusSuccess).toBeTruthy(); + const tx = await wallet.batchTransfer(transferParams); + + const { isStatusSuccess } = await tx.waitForResult(); + + expect(isStatusSuccess).toBeTruthy(); + } }); - bench('should successfully withdraw to the base layer', async () => { - const txParams = { - witnessLimit: 800, - maxFee: 100_000, - }; + bench('should successfully withdraw to the base layer 10 times', async () => { + for (let i = 0; i < 10; i++) { + const txParams = { + witnessLimit: 800, + maxFee: 100_000, + }; - const pendingTx = await wallet.withdrawToBaseLayer(receiver1.address, 500, txParams); - const { transaction } = await pendingTx.waitForResult(); + const pendingTx = await wallet.withdrawToBaseLayer(receiver1.address, 500, txParams); + const { transaction } = await pendingTx.waitForResult(); - expect(transaction).toBeDefined(); + expect(transaction).toBeDefined(); + } }); }); diff --git a/internal/benchmarks/test/fixtures/forc-projects/Forc.toml b/internal/benchmarks/test/fixtures/forc-projects/Forc.toml index 2db5c4ff425..33f77861792 100644 --- a/internal/benchmarks/test/fixtures/forc-projects/Forc.toml +++ b/internal/benchmarks/test/fixtures/forc-projects/Forc.toml @@ -1,2 +1,7 @@ [workspace] -members = ["call-test-contract", "counter-contract"] +members = [ + "call-test-contract", + "counter-contract", + "pyth-contract", + "pyth-interface", +] diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-contract/Forc.toml b/internal/benchmarks/test/fixtures/forc-projects/pyth-contract/Forc.toml new file mode 100644 index 00000000000..01cffae12f1 --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-contract/Forc.toml @@ -0,0 +1,10 @@ +[project] +authors = ["Fuel Labs "] +entry = "main.sw" +license = "Apache-2.0" +name = "pyth-contract" + +[dependencies] +pyth_interface = { path = "../pyth-interface" } +standards = { git = "https://github.com/FuelLabs/sway-standards", tag = "v0.4.4" } +sway_libs = { git = "https://github.com/FuelLabs/sway-libs", tag = "v0.21.0" } diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-contract/src/main.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-contract/src/main.sw new file mode 100644 index 00000000000..8df0945616e --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-contract/src/main.sw @@ -0,0 +1,934 @@ +contract; + +use std::{ + asset_id::AssetId, + block::timestamp, + bytes::Bytes, + call_frames::msg_asset_id, + constants::{ + ZERO_B256, + }, + context::msg_amount, + hash::{ + Hash, + keccak256, + sha256, + }, + revert::revert, + storage::{ + storage_map::StorageMap, + storage_vec::*, + }, +}; + +use pyth_interface::{ + data_structures::{ + batch_attestation_update::*, + data_source::*, + governance_instruction::*, + governance_payload::*, + price::*, + update_type::UpdateType, + wormhole_light::*, + }, + errors::{ + PythError, + WormholeError, + }, + events::{ + ConstructedEvent, + ContractUpgradedEvent, + DataSourcesSetEvent, + FeeSetEvent, + GovernanceDataSourceSetEvent, + NewGuardianSetEvent, + UpdatedPriceFeedsEvent, + ValidPeriodSetEvent, + }, + pyth_merkle_proof::validate_proof, + PythCore, + PythInfo, + PythInit, + utils::total_fee, + WormholeGuardians, +}; + +use sway_libs::ownership::*; +use standards::src5::{SRC5, State}; + +const GUARDIAN_SET_EXPIRATION_TIME_SECONDS: u64 = 86400; // 24 hours in seconds +configurable { + DEPLOYER: Identity = Identity::Address(Address::from(ZERO_B256)), +} + +storage { + // | | + // --+-- PYTH STATE --+-- + // | | + // (chainId, emitterAddress) => isValid; takes advantage of + // constant-time mapping lookup for VM verification + is_valid_data_source: StorageMap = StorageMap {}, + // Mapping of cached price information + // priceId => PriceInfo + latest_price_feed: StorageMap = StorageMap {}, + // Fee required for each update + single_update_fee: u64 = 0, + // For tracking all active emitter/chain ID pairs + valid_data_sources: StorageVec = StorageVec {}, + /// Maximum acceptable time period before price is considered to be stale. + /// This includes attestation delay, block time, and potential clock drift + /// between the source/target chains. + valid_time_period_seconds: u64 = 0, + /// Governance data source. VAA messages from this source can change this contract + /// state. e.g., upgrade the contract, change the valid data sources, and more. + governance_data_source: DataSource = DataSource { + chain_id: 0u16, + emitter_address: ZERO_B256, + }, + /// Index of the governance data source, increased each time the governance data source changes. + governance_data_source_index: u32 = 0, + /// Sequence number of the last executed governance message. Any governance message + /// with a lower or equal sequence number will be discarded. This prevents double-execution, + /// and also makes sure that messages are executed in the right order. + last_executed_governance_sequence: u64 = 0, + /// Chain ID of the contract + chain_id: u16 = 0, + /// | | + /// --+-- WORMHOLE STATE --+-- + /// | | + /// Mapping of consumed governance actions + wormhole_consumed_governance_actions: StorageMap = StorageMap {}, + /// Mapping of guardian_set_index => guardian set + wormhole_guardian_sets: StorageMap = StorageMap {}, + /// Current active guardian set index + wormhole_guardian_set_index: u32 = 0, + /// Using Ethereum's Wormhole governance + wormhole_governance_data_source: DataSource = DataSource { + chain_id: 0u16, + emitter_address: ZERO_B256, + }, + /// | | + /// --+-- GOVERNANCE STATE --+-- + /// | | + current_implementation: Identity = Identity::Address(Address::from(ZERO_B256)), +} + +impl SRC5 for Contract { + #[storage(read)] + fn owner() -> State { + _owner() + } +} + +impl PythCore for Contract { + #[storage(read)] + fn ema_price(price_feed_id: PriceFeedId) -> Price { + ema_price_no_older_than(valid_time_period(), price_feed_id) + } + + #[storage(read)] + fn ema_price_no_older_than(time_period: u64, price_feed_id: PriceFeedId) -> Price { + ema_price_no_older_than(time_period, price_feed_id) + } + + #[storage(read)] + fn ema_price_unsafe(price_feed_id: PriceFeedId) -> Price { + ema_price_unsafe(price_feed_id) + } + + #[storage(read), payable] + fn parse_price_feed_updates( + max_publish_time: u64, + min_publish_time: u64, + target_price_feed_ids: Vec, + update_data: Vec, + ) -> Vec { + require( + msg_asset_id() == AssetId::base(), + PythError::FeesCanOnlyBePaidInTheBaseAsset, + ); + + let required_fee = update_fee(update_data); + require(msg_amount() >= required_fee, PythError::InsufficientFee); + + let mut output_price_feeds: Vec = Vec::with_capacity(target_price_feed_ids.len()); + let mut i = 0; + while i < update_data.len() { + let data = update_data.get(i).unwrap(); + + match UpdateType::determine_type(data) { + UpdateType::Accumulator(accumulator_update) => { + let (mut offset, digest, number_of_updates, encoded) = accumulator_update.verify_and_parse( + current_guardian_set_index(), + storage + .wormhole_guardian_sets, + storage + .is_valid_data_source, + ); + let mut i_2 = 0; + while i_2 < number_of_updates { + let (new_offset, price_feed) = PriceFeed::extract_from_merkle_proof(digest, encoded, offset); + + offset = new_offset; + + if price_feed.id.is_target(target_price_feed_ids) == false { + i_2 += 1; + continue; + } + + if price_feed.price.publish_time >= min_publish_time && price_feed.price.publish_time <= max_publish_time { + // check if output_price_feeds already contains a PriceFeed with price_feed.id, if so continue as we only want 1 + // output PriceFeed per target ID + if price_feed.id.is_contained_within(output_price_feeds) { + i_2 += 1; + continue; + } + + output_price_feeds.push(price_feed) + } + + i_2 += 1; + } + require(offset == encoded.len(), PythError::InvalidUpdateDataLength); + }, + UpdateType::BatchAttestation(batch_attestation_update) => { + let vm = WormholeVM::parse_and_verify_pyth_vm( + current_guardian_set_index(), + batch_attestation_update + .data, + storage + .wormhole_guardian_sets, + storage + .is_valid_data_source, + ); + + let (mut attestation_index, number_of_attestations, attestation_size) = parse_and_verify_batch_attestation_header(vm.payload); + let attestation_size_u16 = attestation_size.as_u64(); + + let mut i_2: u16 = 0; + while i_2 < number_of_attestations { + let (_, slice) = vm.payload.split_at(attestation_index + 32); + let (price_feed_id, _) = slice.split_at(32); + let price_feed_id: PriceFeedId = price_feed_id.into(); + + if price_feed_id.is_target(target_price_feed_ids) == false { + attestation_index += attestation_size_u16; + i_2 += 1; + continue; + } + + let price_feed = PriceFeed::parse_attestation(attestation_size, vm.payload, attestation_index); + + if price_feed.price.publish_time >= min_publish_time && price_feed.price.publish_time <= max_publish_time { + // check if output_price_feeds already contains a PriceFeed with price_feed.id, if so continue; + // as we only want 1 output PriceFeed per target ID + if price_feed.id.is_contained_within(output_price_feeds) { + attestation_index += attestation_size_u16; + i_2 += 1; + continue; + } + + output_price_feeds.push(price_feed) + } + + attestation_index += attestation_size_u16; + i_2 += 1; + } + } + } + + i += 1; + } + + require( + target_price_feed_ids + .len() == output_price_feeds + .len(), + PythError::PriceFeedNotFoundWithinRange, + ); + + output_price_feeds + } + + #[storage(read)] + fn price(price_feed_id: PriceFeedId) -> Price { + price_no_older_than(valid_time_period(), price_feed_id) + } + + #[storage(read)] + fn price_no_older_than(time_period: u64, price_feed_id: PriceFeedId) -> Price { + price_no_older_than(time_period, price_feed_id) + } + + #[storage(read)] + fn price_unsafe(price_feed_id: PriceFeedId) -> Price { + price_unsafe(price_feed_id) + } + + #[storage(read)] + fn update_fee(update_data: Vec) -> u64 { + update_fee(update_data) + } + + #[storage(read, write), payable] + fn update_price_feeds(update_data: Vec) { + update_price_feeds(update_data) + } + + #[storage(read, write), payable] + fn update_price_feeds_if_necessary( + price_feed_ids: Vec, + publish_times: Vec, + update_data: Vec, + ) { + require( + price_feed_ids + .len() == publish_times + .len(), + PythError::LengthOfPriceFeedIdsAndPublishTimesMustMatch, + ); + + let mut i = 0; + while i < price_feed_ids.len() { + if latest_publish_time(price_feed_ids.get(i).unwrap()) < publish_times.get(i).unwrap() + { + update_price_feeds(update_data); + return; + } + + i += 1; + } + } + + #[storage(read)] + fn valid_time_period() -> u64 { + valid_time_period() + } +} + +/// PythCore Private Functions /// +#[storage(read)] +fn ema_price_no_older_than(time_period: u64, price_feed_id: PriceFeedId) -> Price { + let price = ema_price_unsafe(price_feed_id); + let current_time = timestamp(); + require( + current_time - price.publish_time <= time_period, + PythError::OutdatedPrice, + ); + + price +} + +#[storage(read)] +fn ema_price_unsafe(price_feed_id: PriceFeedId) -> Price { + let price_feed = storage.latest_price_feed.get(price_feed_id).try_read(); + require(price_feed.is_some(), PythError::PriceFeedNotFound); + + price_feed.unwrap().ema_price +} + +#[storage(read)] +fn price_no_older_than(time_period: u64, price_feed_id: PriceFeedId) -> Price { + let price = price_unsafe(price_feed_id); + let current_time = timestamp(); + require( + current_time - price.publish_time <= time_period, + PythError::OutdatedPrice, + ); + + price +} + +#[storage(read)] +fn price_unsafe(price_feed_id: PriceFeedId) -> Price { + let price_feed = storage.latest_price_feed.get(price_feed_id).try_read(); + require(price_feed.is_some(), PythError::PriceFeedNotFound); + + price_feed.unwrap().price +} + +#[storage(read)] +fn update_fee(update_data: Vec) -> u64 { + let mut total_number_of_updates = 0; + let mut i = 0; + while i < update_data.len() { + let data = update_data.get(i).unwrap(); + + match UpdateType::determine_type(data) { + UpdateType::Accumulator(accumulator_update) => { + let proof_size_offset = accumulator_update.verify(); + + total_number_of_updates += accumulator_update.total_updates(proof_size_offset); + }, + UpdateType::BatchAttestation => { + total_number_of_updates += 1; + }, + } + + i += 1; + } + + total_fee(total_number_of_updates, storage.single_update_fee) +} + +#[storage(read, write), payable] +fn update_price_feeds(update_data: Vec) { + require( + msg_asset_id() == AssetId::base(), + PythError::FeesCanOnlyBePaidInTheBaseAsset, + ); + + let mut total_number_of_updates = 0; + + // let mut updated_price_feeds: Vec = Vec::new(); // TODO: requires append for Vec + let mut i = 0; + while i < update_data.len() { + let data = update_data.get(i).unwrap(); + + match UpdateType::determine_type(data) { + UpdateType::Accumulator(accumulator_update) => { + let (number_of_updates, _updated_ids) = accumulator_update.update_price_feeds( + current_guardian_set_index(), + storage + .wormhole_guardian_sets, + storage + .latest_price_feed, + storage + .is_valid_data_source, + ); + // updated_price_feeds.append(updated_ids); // TODO: requires append for Vec + total_number_of_updates += number_of_updates; + }, + UpdateType::BatchAttestation(batch_attestation_update) => { + let _updated_ids = batch_attestation_update.update_price_feeds( + current_guardian_set_index(), + storage + .wormhole_guardian_sets, + storage + .latest_price_feed, + storage + .is_valid_data_source, + ); + // updated_price_feeds.append(updated_ids); // TODO: requires append for Vec + total_number_of_updates += 1; + }, + } + + i += 1; + } + + let required_fee = total_fee(total_number_of_updates, storage.single_update_fee); + require(msg_amount() >= required_fee, PythError::InsufficientFee); + + // log(UpdatedPriceFeedsEvent { // TODO: requires append for Vec + // updated_price_feeds, + // }) +} + +#[storage(read)] +fn valid_time_period() -> u64 { + storage.valid_time_period_seconds.read() +} + +#[storage(read)] +fn governance_data_source() -> DataSource { + storage.governance_data_source.read() +} + +#[storage(write)] +fn set_governance_data_source(data_source: DataSource) { + storage.governance_data_source.write(data_source); +} + +#[storage(read)] +fn governance_data_source_index() -> u32 { + storage.governance_data_source_index.read() +} + +#[storage(write)] +fn set_governance_data_source_index(index: u32) { + storage.governance_data_source_index.write(index); +} + +#[storage(read)] +fn last_executed_governance_sequence() -> u64 { + storage.last_executed_governance_sequence.read() +} + +#[storage(write)] +fn set_last_executed_governance_sequence(sequence: u64) { + storage.last_executed_governance_sequence.write(sequence); +} + +#[storage(read)] +fn chain_id() -> u16 { + storage.chain_id.read() +} + +#[storage(read)] +fn current_implementation() -> Identity { + storage.current_implementation.read() +} + +impl PythInit for Contract { + #[storage(read, write)] + fn constructor( + data_sources: Vec, + governance_data_source: DataSource, + wormhole_governance_data_source: DataSource, + single_update_fee: u64, + valid_time_period_seconds: u64, + wormhole_guardian_set_addresses: Vec, + wormhole_guardian_set_index: u32, + chain_id: u16, + ) { + // This function sets the passed identity as the initial owner. https://github.com/FuelLabs/sway-libs/blob/8045a19e3297599750abdf6300c11e9927a29d40/libs/src/ownership.sw#L127-L138 + initialize_ownership(DEPLOYER); + // This function ensures that the sender is the owner. https://github.com/FuelLabs/sway-libs/blob/8045a19e3297599750abdf6300c11e9927a29d40/libs/src/ownership.sw#L59-L65 + only_owner(); + + require(data_sources.len() > 0, PythError::InvalidDataSourcesLength); + + let mut i = 0; + while i < data_sources.len() { + let data_source = data_sources.get(i).unwrap(); + storage.is_valid_data_source.insert(data_source, true); + storage.valid_data_sources.push(data_source); + + i += 1; + } + storage + .latest_price_feed + .write(StorageMap:: {}); + + storage + .valid_time_period_seconds + .write(valid_time_period_seconds); + storage.single_update_fee.write(single_update_fee); + + let guardian_length: u8 = wormhole_guardian_set_addresses.len().try_as_u8().unwrap(); + let mut new_guardian_set = StorageGuardianSet::new( + 0, + StorageKey::>::new( + sha256(("guardian_set_keys", wormhole_guardian_set_index)), + 0, + ZERO_B256, + ), + ); + let mut i: u8 = 0; + while i < guardian_length { + let key: b256 = wormhole_guardian_set_addresses.get(i.as_u64()).unwrap(); + new_guardian_set.keys.push(key); + i += 1; + } + + storage + .wormhole_guardian_set_index + .write(wormhole_guardian_set_index); + storage + .wormhole_guardian_sets + .insert(wormhole_guardian_set_index, new_guardian_set); + + storage.governance_data_source.write(governance_data_source); + storage + .wormhole_governance_data_source + .write(wormhole_governance_data_source); + storage.governance_data_source_index.write(0); + storage + .wormhole_consumed_governance_actions + .write(StorageMap:: {}); + storage.chain_id.write(chain_id); + storage.last_executed_governance_sequence.write(0); + + storage + .current_implementation + .write(Identity::Address(Address::from(ZERO_B256))); + + // This function revokes ownership of the current owner and disallows any new owners. https://github.com/FuelLabs/sway-libs/blob/8045a19e3297599750abdf6300c11e9927a29d40/libs/src/ownership.sw#L89-L99 + renounce_ownership(); + + log(ConstructedEvent { + guardian_set_index: wormhole_guardian_set_index, + }) + } +} + +impl PythInfo for Contract { + #[storage(read)] + fn valid_data_sources() -> Vec { + storage.valid_data_sources.load_vec() + } + + #[storage(read)] + fn latest_publish_time(price_feed_id: PriceFeedId) -> u64 { + latest_publish_time(price_feed_id) + } + + #[storage(read)] + fn price_feed_exists(price_feed_id: PriceFeedId) -> bool { + match storage.latest_price_feed.get(price_feed_id).try_read() { + Some(_) => true, + None => false, + } + } + + #[storage(read)] + fn price_feed_unsafe(price_feed_id: PriceFeedId) -> PriceFeed { + let price_feed = storage.latest_price_feed.get(price_feed_id).try_read(); + require(price_feed.is_some(), PythError::PriceFeedNotFound); + price_feed.unwrap() + } + + #[storage(read)] + fn single_update_fee() -> u64 { + storage.single_update_fee.read() + } + + #[storage(read)] + fn is_valid_data_source(data_source: DataSource) -> bool { + data_source.is_valid_data_source(storage.is_valid_data_source) + } + + #[storage(read)] + fn last_executed_governance_sequence() -> u64 { + last_executed_governance_sequence() + } + + #[storage(read)] + fn chain_id() -> u16 { + chain_id() + } +} + +/// PythInfo Private Functions /// +#[storage(read)] +fn latest_publish_time(price_feed_id: PriceFeedId) -> u64 { + match storage.latest_price_feed.get(price_feed_id).try_read() { + Some(price_feed) => price_feed.price.publish_time, + None => 0, + } +} + +impl WormholeGuardians for Contract { + #[storage(read)] + fn current_guardian_set_index() -> u32 { + current_guardian_set_index() + } + + #[storage(read)] + fn current_wormhole_provider() -> DataSource { + current_wormhole_provider() + } + + #[storage(read)] + fn guardian_set(index: u32) -> GuardianSet { + let stored_guardian_set = storage.wormhole_guardian_sets.get(index).try_read(); + require( + stored_guardian_set + .is_some(), + PythError::GuardianSetNotFound, + ); + GuardianSet::from_stored(stored_guardian_set.unwrap()) + } + + #[storage(read)] + fn governance_action_is_consumed(governance_action_hash: b256) -> bool { + governance_action_is_consumed(governance_action_hash) + } + + #[storage(read, write)] + fn submit_new_guardian_set(encoded_vm: Bytes) { + submit_new_guardian_set(encoded_vm) + } +} + +/// WormholeGuardians Private Functions /// +#[storage(read)] +fn current_guardian_set_index() -> u32 { + storage.wormhole_guardian_set_index.read() +} + +#[storage(read)] +fn current_wormhole_provider() -> DataSource { + storage.wormhole_governance_data_source.read() +} + +#[storage(read)] +fn governance_action_is_consumed(governance_action_hash: b256) -> bool { + match storage.wormhole_consumed_governance_actions.get(governance_action_hash).try_read() { + Some(bool_) => bool_, + None => false, + } +} + +#[storage(read, write)] +fn submit_new_guardian_set(encoded_vm: Bytes) { + let vm: WormholeVM = WormholeVM::parse_and_verify_wormhole_vm( + current_guardian_set_index(), + encoded_vm, + storage + .wormhole_guardian_sets, + ); + require( + vm.guardian_set_index == current_guardian_set_index(), + WormholeError::NotSignedByCurrentGuardianSet, + ); + let current_wormhole_provider: DataSource = current_wormhole_provider(); + require( + vm.emitter_chain_id == current_wormhole_provider + .chain_id, + WormholeError::InvalidGovernanceChain, + ); + require( + vm.emitter_address == current_wormhole_provider + .emitter_address, + WormholeError::InvalidGovernanceContract, + ); + require( + governance_action_is_consumed(vm.governance_action_hash) == false, + WormholeError::GovernanceActionAlreadyConsumed, + ); + + let current_guardian_set_index: u32 = current_guardian_set_index(); + let upgrade: GuardianSetUpgrade = GuardianSetUpgrade::parse_encoded_upgrade(current_guardian_set_index, vm.payload); + + storage + .wormhole_consumed_governance_actions + .insert(vm.governance_action_hash, true); + + // Set expiry if current GuardianSet exists + let current_guardian_set = storage.wormhole_guardian_sets.get(current_guardian_set_index).try_read(); + if current_guardian_set.is_some() { + let mut current_guardian_set = current_guardian_set.unwrap(); + current_guardian_set.expiration_time = timestamp() + GUARDIAN_SET_EXPIRATION_TIME_SECONDS; + storage + .wormhole_guardian_sets + .insert(current_guardian_set_index, current_guardian_set); + } + + storage + .wormhole_guardian_sets + .insert(upgrade.new_guardian_set_index, upgrade.new_guardian_set); + storage + .wormhole_guardian_set_index + .write(upgrade.new_guardian_set_index); + + log(NewGuardianSetEvent { + governance_action_hash: vm.governance_action_hash, + new_guardian_set_index: upgrade.new_guardian_set_index, + }) +} + +/// Transfer the governance data source to a new value with sanity checks to ensure the new governance data source can manage the contract. +#[storage(read, write)] +fn authorize_governance_data_source_transfer( + payload: AuthorizeGovernanceDataSourceTransferPayload, +) { + let old_governance_data_source = governance_data_source(); + + // Parse and verify the VAA contained in the payload to ensure it's valid and can manage the contract + let vm: WormholeVM = WormholeVM::parse_and_verify_wormhole_vm( + current_guardian_set_index(), + payload + .claim_vaa, + storage + .wormhole_guardian_sets, + ); + + let gi = GovernanceInstruction::parse_governance_instruction(vm.payload); + require( + gi.target_chain_id == chain_id() || gi.target_chain_id == 0, + PythError::InvalidGovernanceTarget, + ); + + require( + match gi.action { + GovernanceAction::RequestGovernanceDataSourceTransfer => true, + _ => false, + }, + PythError::InvalidGovernanceMessage, + ); + + let claim_payload = GovernanceInstruction::parse_request_governance_data_source_transfer_payload(gi.payload); + + require( + governance_data_source_index() < claim_payload + .governance_data_source_index, + PythError::OldGovernanceMessage, + ); + + set_governance_data_source_index(claim_payload.governance_data_source_index); + + let new_governance_data_source = DataSource { + chain_id: vm.emitter_chain_id, + emitter_address: vm.emitter_address, + }; + + set_governance_data_source(new_governance_data_source); + + // Setting the last executed governance to the claimVaa sequence to avoid using older sequences. + set_last_executed_governance_sequence(vm.sequence); + + log(GovernanceDataSourceSetEvent { + old_data_source: old_governance_data_source, + new_data_source: new_governance_data_source, + initial_sequence: vm.sequence, + }); +} + +#[storage(read, write)] +fn set_data_sources(payload: SetDataSourcesPayload) { + let old_data_sources = storage.valid_data_sources.load_vec(); + + let mut i = 0; + while i < old_data_sources.len() { + let data_source = old_data_sources.get(i).unwrap(); + storage.is_valid_data_source.insert(data_source, false); + i += 1; + } + + // Clear the current list of valid data sources + storage.valid_data_sources.clear(); + + i = 0; + // Add new data sources from the payload and mark them as valid + while i < payload.data_sources.len() { + let data_source = payload.data_sources.get(i).unwrap(); + storage.valid_data_sources.push(data_source); + storage.is_valid_data_source.insert(data_source, true); + + i += 1; + } + + // Emit an event with the old and new data sources + log(DataSourcesSetEvent { + old_data_sources: old_data_sources, + new_data_sources: storage.valid_data_sources.load_vec(), + }); +} + +#[storage(read, write)] +fn set_fee(payload: SetFeePayload) { + let old_fee = storage.single_update_fee.read(); + storage.single_update_fee.write(payload.new_fee); + + log(FeeSetEvent { + old_fee, + new_fee: payload.new_fee, + }); +} + +#[storage(read, write)] +fn set_valid_period(payload: SetValidPeriodPayload) { + let old_valid_period = storage.valid_time_period_seconds.read(); + storage + .valid_time_period_seconds + .write(payload.new_valid_period); + + log(ValidPeriodSetEvent { + old_valid_period, + new_valid_period: payload.new_valid_period, + }); +} + +abi PythGovernance { + #[storage(read)] + fn governance_data_source() -> DataSource; + + #[storage(read, write)] + fn execute_governance_instruction(encoded_vm: Bytes); +} + +impl PythGovernance for Contract { + #[storage(read)] + fn governance_data_source() -> DataSource { + governance_data_source() + } + + #[storage(read, write)] + fn execute_governance_instruction(encoded_vm: Bytes) { + execute_governance_instruction(encoded_vm) + } +} + +#[storage(read, write)] +fn execute_governance_instruction(encoded_vm: Bytes) { + let vm = verify_governance_vm(encoded_vm); + // Log so that the WormholeVM struct will show up in the ABI and can be used in the tests + log(vm); + + let gi = GovernanceInstruction::parse_governance_instruction(vm.payload); + // Log so that the GovernanceInstruction struct will show up in the ABI and can be used in the tests + log(gi); + + require( + gi.target_chain_id == chain_id() || gi.target_chain_id == 0, + PythError::InvalidGovernanceTarget, + ); + + match gi.action { + GovernanceAction::UpgradeContract => { + require(gi.target_chain_id != 0, PythError::InvalidGovernanceTarget); + // TODO: implement upgrade_upgradeable_contract(uc) when Fuel releases the upgrade standard library; + log("Upgrade functionality not implemented"); + revert(0u64); + }, + GovernanceAction::AuthorizeGovernanceDataSourceTransfer => { + let agdst = GovernanceInstruction::parse_authorize_governance_data_source_transfer_payload(gi.payload); + log(agdst); + authorize_governance_data_source_transfer(agdst); + }, + GovernanceAction::SetDataSources => { + let sdsp = GovernanceInstruction::parse_set_data_sources_payload(gi.payload); + log(sdsp); + set_data_sources(sdsp); + }, + GovernanceAction::SetFee => { + let sf = GovernanceInstruction::parse_set_fee_payload(gi.payload); + log(sf); + set_fee(sf); + }, + GovernanceAction::SetValidPeriod => { + let svp = GovernanceInstruction::parse_set_valid_period_payload(gi.payload); + log(svp); + set_valid_period(svp); + }, + GovernanceAction::RequestGovernanceDataSourceTransfer => { + // RequestGovernanceDataSourceTransfer can be only part of AuthorizeGovernanceDataSourceTransfer message + // The `revert` function only accepts u64, so as + // a workaround we use require. + require(false, PythError::InvalidGovernanceMessage); + }, + _ => { + // The `revert` function only accepts u64, so as + // a workaround we use require. + require(false, PythError::InvalidGovernanceMessage); + } + } +} + +#[storage(read, write)] +fn verify_governance_vm(encoded_vm: Bytes) -> WormholeVM { + let vm: WormholeVM = WormholeVM::parse_and_verify_wormhole_vm( + current_guardian_set_index(), + encoded_vm, + storage + .wormhole_guardian_sets, + ); + + require( + storage + .governance_data_source + .read() + .is_valid_governance_data_source(vm.emitter_chain_id, vm.emitter_address), + PythError::InvalidGovernanceDataSource, + ); + + require( + vm.sequence > last_executed_governance_sequence(), + PythError::OldGovernanceMessage, + ); + + set_last_executed_governance_sequence(vm.sequence); + vm +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/Forc.toml b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/Forc.toml new file mode 100644 index 00000000000..7cb44a4edc7 --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/Forc.toml @@ -0,0 +1,8 @@ +[project] +authors = ["Fuel Labs "] +entry = "interface.sw" +license = "Apache-2.0" +name = "pyth_interface" + +[dependencies] +standards = { git = "https://github.com/FuelLabs/sway-standards", tag = "v0.4.4" } diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures.sw new file mode 100644 index 00000000000..4e7d6ddb91f --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures.sw @@ -0,0 +1,11 @@ +library; + +// The order of the modules is important because of the dependencies between them. +pub mod data_source; +pub mod wormhole_light; +pub mod price; +pub mod accumulator_update; +pub mod batch_attestation_update; +pub mod governance_payload; +pub mod governance_instruction; +pub mod update_type; diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/accumulator_update.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/accumulator_update.sw new file mode 100644 index 00000000000..014cee89756 --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/accumulator_update.sw @@ -0,0 +1,136 @@ +library; + +use ::errors::PythError; +use ::data_structures::{data_source::*, price::*, wormhole_light::{StorageGuardianSet, WormholeVM}}; +use std::{bytes::Bytes, hash::Hash}; + +pub struct AccumulatorUpdate { + data: Bytes, +} +const MINIMUM_ALLOWED_MINOR_VERSION = 0; +const MAJOR_VERSION = 1; +impl AccumulatorUpdate { + pub fn new(data: Bytes) -> Self { + Self { data } + } + pub fn total_updates(self, ref mut offset: u64) -> u64 { + let proof_size = u16::from_be_bytes([self.data.get(offset).unwrap(), self.data.get(offset + 1).unwrap()]).as_u64(); + offset += proof_size + 2; + self.data.get(offset).unwrap().as_u64() + } + pub fn verify(self) -> u64 { + // skip magic as already checked when this is called + let major_version = self.data.get(4); + require( + major_version + .is_some() && major_version + .unwrap() == MAJOR_VERSION, + PythError::InvalidMajorVersion, + ); + let minor_version = self.data.get(5); + require( + minor_version + .is_some() && minor_version + .unwrap() >= MINIMUM_ALLOWED_MINOR_VERSION, + PythError::InvalidMinorVersion, + ); + let trailing_header_size = self.data.get(6); + require(trailing_header_size.is_some(), PythError::InvalidHeaderSize); + // skip trailing headers and update type + let offset = 8 + trailing_header_size.unwrap().as_u64(); + require( + self.data + .len() >= offset, + PythError::InvalidUpdateDataLength, + ); + offset + } +} +impl AccumulatorUpdate { + #[storage(read)] + pub fn verify_and_parse( + self, + current_guardian_set_index: u32, + wormhole_guardian_sets: StorageKey>, + is_valid_data_source: StorageKey>, +) -> (u64, Bytes, u64, Bytes) { + let encoded_offset = self.verify(); + let (_, slice) = self.data.split_at(encoded_offset); + let (encoded_slice, _) = slice.split_at(self.data.len() - encoded_offset); + let mut offset = 0; + let wormhole_proof_size = u16::from_be_bytes([encoded_slice.get(offset).unwrap(), encoded_slice.get(offset + 1).unwrap()]).as_u64(); + offset += 2; + let (_, slice) = encoded_slice.split_at(offset); + let (encoded_vm, _) = slice.split_at(wormhole_proof_size); + let vm = WormholeVM::parse_and_verify_pyth_vm( + current_guardian_set_index, + encoded_vm, + wormhole_guardian_sets, + is_valid_data_source, + ); + offset += wormhole_proof_size; + let encoded_payload = vm.payload; + /* + Payload offset: + skip magic (4 bytes) as already checked when this is called + skip update_type as (1 byte) it can only be WormholeMerkle + skip slot (8 bytes) as unused + skip ring_size (4 bytes) as unused + */ + let mut payload_offset = 17; + let (_, slice) = encoded_payload.split_at(payload_offset); + let (digest, _) = slice.split_at(20); + payload_offset += 20; + require( + payload_offset <= encoded_payload + .len(), + PythError::InvalidPayloadLength, + ); + let number_of_updates = encoded_slice.get(offset); + require( + number_of_updates + .is_some(), + PythError::NumberOfUpdatesIrretrievable, + ); + offset += 1; + (offset, digest, number_of_updates.unwrap().as_u64(), encoded_slice) + } +} +impl AccumulatorUpdate { + #[storage(read, write)] + pub fn update_price_feeds( + self, + current_guardian_set_index: u32, + wormhole_guardian_sets: StorageKey>, + latest_price_feed: StorageKey>, + is_valid_data_source: StorageKey>, +) -> (u64, Vec) { + let (mut offset, digest, number_of_updates, encoded_data) = self.verify_and_parse( + current_guardian_set_index, + wormhole_guardian_sets, + is_valid_data_source, + ); + + let mut updated_ids = Vec::new(); + let mut i = 0; + while i < number_of_updates { + let (new_offset, price_feed) = PriceFeed::extract_from_merkle_proof(digest, encoded_data, offset); + offset = new_offset; + let latest_publish_time = match latest_price_feed.get(price_feed.id).try_read() { + Some(price_feed) => price_feed.price.publish_time, + None => 0, + }; + if price_feed.price.publish_time > latest_publish_time { + latest_price_feed.insert(price_feed.id, price_feed); + updated_ids.push(price_feed.id); + } + i += 1; + } + require( + offset == encoded_data + .len(), + PythError::InvalidUpdateDataLength, + ); + (number_of_updates, updated_ids) + } +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/batch_attestation_update.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/batch_attestation_update.sw new file mode 100644 index 00000000000..f1ef0a21b81 --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/batch_attestation_update.sw @@ -0,0 +1,94 @@ +library; + +use ::errors::PythError; +use ::data_structures::{data_source::*, price::*, wormhole_light::{StorageGuardianSet, WormholeVM}}; +use std::{bytes::Bytes, hash::Hash}; + +const BATCH_MAGIC: u32 = 0x50325748; + +pub struct BatchAttestationUpdate { + pub data: Bytes, +} +impl BatchAttestationUpdate { + pub fn new(data: Bytes) -> Self { + Self { data } + } + #[storage(read, write)] + pub fn update_price_feeds( + self, + current_guardian_set_index: u32, + wormhole_guardian_sets: StorageKey>, + latest_price_feed: StorageKey>, + is_valid_data_source: StorageKey>, +) -> Vec { + let vm = WormholeVM::parse_and_verify_pyth_vm( + current_guardian_set_index, + self.data, + wormhole_guardian_sets, + is_valid_data_source, + ); + let (mut attestation_index, number_of_attestations, attestation_size) = parse_and_verify_batch_attestation_header(vm.payload); + let mut updated_ids = Vec::new(); + let mut i: u16 = 0; + while i < number_of_attestations { + let price_feed = PriceFeed::parse_attestation(attestation_size, vm.payload, attestation_index); + // Respect specified attestation size for forward-compatibility + attestation_index += attestation_size.as_u64(); + let latest_publish_time = match latest_price_feed.get(price_feed.id).try_read() { + Some(price_feed) => price_feed.price.publish_time, + None => 0, + }; + if price_feed.price.publish_time > latest_publish_time { + latest_price_feed.insert(price_feed.id, price_feed); + updated_ids.push(price_feed.id); + } + i += 1; + } + updated_ids + } +} +pub fn parse_and_verify_batch_attestation_header(encoded_payload: Bytes) -> (u64, u16, u16) { + let mut index = 0; + //Check header + let magic = u32::from_be_bytes([ + encoded_payload.get(index).unwrap(), + encoded_payload.get(index + 1).unwrap(), + encoded_payload.get(index + 2).unwrap(), + encoded_payload.get(index + 3).unwrap(), + ]); + require(magic == BATCH_MAGIC, PythError::InvalidMagic); + index += 4; + let major_version = u16::from_be_bytes([encoded_payload.get(index).unwrap(), encoded_payload.get(index + 1).unwrap()]); + require(major_version == 3, PythError::InvalidMajorVersion); + // addtionally skip minor_version(2 bytes) as unused + index += 4; + let header_size = u16::from_be_bytes([encoded_payload.get(index).unwrap(), encoded_payload.get(index + 1).unwrap()]); + index += 2; + // From solidity impl: + // NOTE(2022-04-19): Currently, only payloadId comes after + // hdrSize. Future extra header fields must be read using a + // separate offset to respect hdrSize, i.e.: + // uint hdrIndex = 0; + // bpa.header.payloadId = UnsafeBytesLib.toUint8(encoded, index + hdrIndex); + // hdrIndex += 1; + // bpa.header.someNewField = UnsafeBytesLib.toUint32(encoded, index + hdrIndex); + // hdrIndex += 4; + // Skip remaining unknown header bytes + // index += bpa.header.hdrSize; + let payload_id = encoded_payload.get(index).unwrap(); + // Payload ID of 2 required for batch header + require(payload_id == 2, PythError::InvalidPayloadId); + // Skip remaining unknown header bytes + index += header_size.as_u64(); + let number_of_attestations = u16::from_be_bytes([encoded_payload.get(index).unwrap(), encoded_payload.get(index + 1).unwrap()]); + index += 2; + let attestation_size = u16::from_be_bytes([encoded_payload.get(index).unwrap(), encoded_payload.get(index + 1).unwrap()]); + index += 2; + require( + encoded_payload + .len() == index + (attestation_size * number_of_attestations) + .as_u64(), + PythError::InvalidPayloadLength, + ); + return (index, number_of_attestations, attestation_size); +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/data_source.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/data_source.sw new file mode 100644 index 00000000000..2cb65cd0b19 --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/data_source.sw @@ -0,0 +1,39 @@ +library; + +use std::hash::{Hash, Hasher}; + +pub struct DataSource { + pub chain_id: u16, + pub emitter_address: b256, +} + +impl Hash for DataSource { + fn hash(self, ref mut state: Hasher) { + self.chain_id.hash(state); + self.emitter_address.hash(state); + } +} + +impl DataSource { + pub fn new(chain_id: u16, emitter_address: b256) -> Self { + Self { + chain_id, + emitter_address, + } + } + + #[storage(read)] + pub fn is_valid_data_source( + self, + is_valid_data_source: StorageKey>, +) -> bool { + match is_valid_data_source.get(self).try_read() { + Some(bool) => bool, + None => false, + } + } + + pub fn is_valid_governance_data_source(self, chain_id: u16, emitter_address: b256) -> bool { + self.chain_id == chain_id && self.emitter_address == emitter_address + } +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/governance_instruction.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/governance_instruction.sw new file mode 100644 index 00000000000..645b440bedb --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/governance_instruction.sw @@ -0,0 +1,242 @@ +library; + +use ::errors::PythError; +use ::data_structures::{ + data_source::*, + governance_payload::*, + price::*, + wormhole_light::{ + StorageGuardianSet, + WormholeVM, + }, +}; +use std::{bytes::Bytes, hash::Hash}; +use std::math::*; +use std::primitive_conversions::{u32::*, u64::*}; + +pub const MAGIC: u32 = 0x5054474d; + +pub struct GovernanceInstruction { + pub magic: u32, + pub module: GovernanceModule, + pub action: GovernanceAction, + pub target_chain_id: u16, + pub payload: Bytes, +} + +pub enum GovernanceModule { + Executor: (), // 0 + Target: (), // 1 + EvmExecutor: (), // 2 + StacksTarget: (), // 3 + Invalid: (), +} + +pub enum GovernanceAction { + UpgradeContract: (), // 0 + AuthorizeGovernanceDataSourceTransfer: (), // 1 + SetDataSources: (), // 2 + SetFee: (), // 3 + SetValidPeriod: (), // 4 + RequestGovernanceDataSourceTransfer: (), // 5 + Invalid: (), +} + +impl GovernanceInstruction { + pub fn new( + magic: u32, + module: GovernanceModule, + action: GovernanceAction, + target_chain_id: u16, + payload: Bytes, + ) -> Self { + Self { + magic, + module, + action, + target_chain_id, + payload, + } + } + + pub fn parse_governance_instruction(encoded_instruction: Bytes) -> Self { + let mut index = 0; + let magic = u32::from_be_bytes([ + encoded_instruction.get(index).unwrap(), + encoded_instruction.get(index + 1).unwrap(), + encoded_instruction.get(index + 2).unwrap(), + encoded_instruction.get(index + 3).unwrap(), + ]); + require(magic == MAGIC, PythError::InvalidMagic); + index += 4; + + let mod_number = encoded_instruction.get(index).unwrap(); + let module = match mod_number { + 0 => GovernanceModule::Executor, + 1 => GovernanceModule::Target, + 2 => GovernanceModule::EvmExecutor, + 3 => GovernanceModule::StacksTarget, + _ => GovernanceModule::Invalid, + }; + require( + match module { + GovernanceModule::Target => true, + _ => false, + }, + PythError::InvalidGovernanceTarget, + ); + index += 1; + + let action_number = encoded_instruction.get(index).unwrap(); + let governance_action = match action_number { + 0 => GovernanceAction::UpgradeContract, // Not implemented + 1 => GovernanceAction::AuthorizeGovernanceDataSourceTransfer, + 2 => GovernanceAction::SetDataSources, + 3 => GovernanceAction::SetFee, + 4 => GovernanceAction::SetValidPeriod, + 5 => GovernanceAction::RequestGovernanceDataSourceTransfer, + _ => GovernanceAction::Invalid, + }; + require( + match governance_action { + GovernanceAction::Invalid => false, + _ => true, + }, + PythError::InvalidGovernanceAction, + ); + index += 1; + + let target_chain_id = u16::from_be_bytes([ + encoded_instruction.get(index).unwrap(), + encoded_instruction.get(index + 1).unwrap(), + ]); + index += 2; + + let (_, payload) = encoded_instruction.split_at(index); + + GovernanceInstruction::new(magic, module, governance_action, target_chain_id, payload) + } + + /// Parse an AuthorizeGovernanceDataSourceTransferPayload (action 2) with minimal validation + pub fn parse_authorize_governance_data_source_transfer_payload( + encoded_payload: Bytes, + ) -> AuthorizeGovernanceDataSourceTransferPayload { + AuthorizeGovernanceDataSourceTransferPayload { + claim_vaa: encoded_payload, + } + } + + pub fn parse_request_governance_data_source_transfer_payload( + encoded_payload: Bytes, + ) -> RequestGovernanceDataSourceTransferPayload { + let mut index = 0; + let governance_data_source_index = u32::from_be_bytes([ + encoded_payload.get(index).unwrap(), + encoded_payload.get(index + 1).unwrap(), + encoded_payload.get(index + 2).unwrap(), + encoded_payload.get(index + 3).unwrap(), + ]); + index += 4; + require( + index == encoded_payload + .len(), + PythError::InvalidGovernanceMessage, + ); + let rdgst = RequestGovernanceDataSourceTransferPayload { + governance_data_source_index, + }; + rdgst + } + + pub fn parse_set_data_sources_payload(encoded_payload: Bytes) -> SetDataSourcesPayload { + let mut index = 0; + let data_sources_length = encoded_payload.get(index).unwrap().as_u64(); + index += 1; + let mut data_sources = Vec::with_capacity(data_sources_length); + + let mut i = 0; + while i < data_sources_length { + let (_, slice) = encoded_payload.split_at(index); + let (slice, _) = slice.split_at(2); + let chain_id = u16::from_be_bytes([slice.get(0).unwrap(), slice.get(1).unwrap()]); + index += 2; + let (_, slice) = encoded_payload.split_at(index); + let (slice, _) = slice.split_at(32); + let emitter_address: b256 = slice.into(); + index += 32; + + data_sources.push(DataSource { + chain_id, + emitter_address, + }); + i += 1 + } + + require( + index == encoded_payload + .len(), + PythError::InvalidGovernanceMessage, + ); + let sds = SetDataSourcesPayload { data_sources }; + sds + } + + pub fn parse_set_fee_payload(encoded_payload: Bytes) -> SetFeePayload { + let mut index = 0; + let val = u64::from_be_bytes([ + encoded_payload.get(index).unwrap(), + encoded_payload.get(index + 1).unwrap(), + encoded_payload.get(index + 2).unwrap(), + encoded_payload.get(index + 3).unwrap(), + encoded_payload.get(index + 4).unwrap(), + encoded_payload.get(index + 5).unwrap(), + encoded_payload.get(index + 6).unwrap(), + encoded_payload.get(index + 7).unwrap(), + ]); + index += 8; + let expo = u64::from_be_bytes([ + encoded_payload.get(index).unwrap(), + encoded_payload.get(index + 1).unwrap(), + encoded_payload.get(index + 2).unwrap(), + encoded_payload.get(index + 3).unwrap(), + encoded_payload.get(index + 4).unwrap(), + encoded_payload.get(index + 5).unwrap(), + encoded_payload.get(index + 6).unwrap(), + encoded_payload.get(index + 7).unwrap(), + ]); + index += 8; + require( + encoded_payload + .len() == index, + PythError::InvalidGovernanceMessage, + ); + let sf = SetFeePayload { + new_fee: val * 10u64.pow(expo.try_as_u32().unwrap()), + }; + sf + } + + pub fn parse_set_valid_period_payload(encoded_payload: Bytes) -> SetValidPeriodPayload { + let mut index = 0; + let valid_time_period_seconds = u64::from_be_bytes([ + encoded_payload.get(index).unwrap(), + encoded_payload.get(index + 1).unwrap(), + encoded_payload.get(index + 2).unwrap(), + encoded_payload.get(index + 3).unwrap(), + encoded_payload.get(index + 4).unwrap(), + encoded_payload.get(index + 5).unwrap(), + encoded_payload.get(index + 6).unwrap(), + encoded_payload.get(index + 7).unwrap(), + ]); + index += 8; + require( + index == encoded_payload + .len(), + PythError::InvalidGovernanceMessage, + ); + let svp = SetValidPeriodPayload { + new_valid_period: valid_time_period_seconds, + }; + svp + } +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/governance_payload.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/governance_payload.sw new file mode 100644 index 00000000000..c00838a915e --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/governance_payload.sw @@ -0,0 +1,29 @@ +library; + +use std::bytes::Bytes; + +use ::data_structures::data_source::DataSource; + +pub struct UpgradeContractPayload { + pub new_implementation: Identity, +} + +pub struct AuthorizeGovernanceDataSourceTransferPayload { + pub claim_vaa: Bytes, +} + +pub struct RequestGovernanceDataSourceTransferPayload { + pub governance_data_source_index: u32, +} + +pub struct SetDataSourcesPayload { + pub data_sources: Vec, +} + +pub struct SetFeePayload { + pub new_fee: u64, +} + +pub struct SetValidPeriodPayload { + pub new_valid_period: u64, +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/price.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/price.sw new file mode 100644 index 00000000000..d3d619d7516 --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/price.sw @@ -0,0 +1,343 @@ +library; + +use std::{block::timestamp, bytes::Bytes}; + +use ::errors::PythError; +use ::utils::absolute_of_exponent; +use ::data_structures::wormhole_light::WormholeVM; +use ::pyth_merkle_proof::validate_proof; +const TAI64_DIFFERENCE = 4611686018427387904; +// A price with a degree of uncertainty, represented as a price +- a confidence interval. +// +// The confidence interval roughly corresponds to the standard error of a normal distribution. +// Both the price and confidence are stored in a fixed-point numeric representation, +// `x * (10^expo)`, where `expo` is the exponent. +// +// Please refer to the documentation at https://docs.pyth.network/documentation/pythnet-price-feeds/best-practices for how +// to how this price safely. +pub struct Price { + // Confidence interval around the price + pub confidence: u64, + // Price exponent + // This value represents the absolute value of an i32 in the range -255 to 0. Values other than 0, should be considered negative: + // exponent of 5 means the Pyth Price exponent was -5 + pub exponent: u32, + // Price + pub price: u64, + // The TAI64 timestamp describing when the price was published + pub publish_time: u64, +} +impl Price { + pub fn new( + confidence: u64, + exponent: u32, + price: u64, + publish_time: u64, + ) -> Self { + Self { + confidence, + exponent, + price, + publish_time, + } + } +} +// The `PriceFeedId` type is an alias for `b256` that represents the id for a specific Pyth price feed. +pub type PriceFeedId = b256; +// PriceFeed represents a current aggregate price from Pyth publisher feeds. +pub struct PriceFeed { + // Latest available exponentially-weighted moving average price + pub ema_price: Price, + // The price ID. + pub id: PriceFeedId, + // Latest available price + pub price: Price, +} +impl PriceFeedId { + pub fn is_target(self, target_price_feed_ids: Vec) -> bool { + let mut i = 0; + while i < target_price_feed_ids.len() { + if target_price_feed_ids.get(i).unwrap() == self { + return true; + } + i += 1; + } + false + } + pub fn is_contained_within(self, output_price_feeds: Vec) -> bool { + let mut i = 0; + while i < output_price_feeds.len() { + if output_price_feeds.get(i).unwrap().id == self { + return true; + } + i += 1; + } + false + } +} +impl PriceFeed { + pub fn new(ema_price: Price, id: PriceFeedId, price: Price) -> Self { + Self { + ema_price, + id, + price, + } + } +} +impl PriceFeed { + pub fn parse_message(encoded_price_feed: Bytes) -> Self { + let mut offset = 1u64; + let (_, slice) = encoded_price_feed.split_at(offset); + let (price_feed_id, _) = slice.split_at(32); + let price_feed_id: PriceFeedId = price_feed_id.into(); + offset += 32; + let price = u64::from_be_bytes([ + encoded_price_feed.get(offset).unwrap(), + encoded_price_feed.get(offset + 1).unwrap(), + encoded_price_feed.get(offset + 2).unwrap(), + encoded_price_feed.get(offset + 3).unwrap(), + encoded_price_feed.get(offset + 4).unwrap(), + encoded_price_feed.get(offset + 5).unwrap(), + encoded_price_feed.get(offset + 6).unwrap(), + encoded_price_feed.get(offset + 7).unwrap(), + ]); + offset += 8; + let confidence = u64::from_be_bytes([ + encoded_price_feed.get(offset).unwrap(), + encoded_price_feed.get(offset + 1).unwrap(), + encoded_price_feed.get(offset + 2).unwrap(), + encoded_price_feed.get(offset + 3).unwrap(), + encoded_price_feed.get(offset + 4).unwrap(), + encoded_price_feed.get(offset + 5).unwrap(), + encoded_price_feed.get(offset + 6).unwrap(), + encoded_price_feed.get(offset + 7).unwrap(), + ]); + offset += 8; + // exponent is an i32, expected to be in the range -255 to 0 + let exponent = u32::from_be_bytes([ + encoded_price_feed.get(offset).unwrap(), + encoded_price_feed.get(offset + 1).unwrap(), + encoded_price_feed.get(offset + 2).unwrap(), + encoded_price_feed.get(offset + 3).unwrap(), + ]); + let exponent = absolute_of_exponent(exponent); + require(exponent < 256u32, PythError::InvalidExponent); + offset += 4; + let mut publish_time = u64::from_be_bytes([ + encoded_price_feed.get(offset).unwrap(), + encoded_price_feed.get(offset + 1).unwrap(), + encoded_price_feed.get(offset + 2).unwrap(), + encoded_price_feed.get(offset + 3).unwrap(), + encoded_price_feed.get(offset + 4).unwrap(), + encoded_price_feed.get(offset + 5).unwrap(), + encoded_price_feed.get(offset + 6).unwrap(), + encoded_price_feed.get(offset + 7).unwrap(), + ]); + // skip unused previous_publish_times (8 bytes) + offset += 16; + let ema_price = u64::from_be_bytes([ + encoded_price_feed.get(offset).unwrap(), + encoded_price_feed.get(offset + 1).unwrap(), + encoded_price_feed.get(offset + 2).unwrap(), + encoded_price_feed.get(offset + 3).unwrap(), + encoded_price_feed.get(offset + 4).unwrap(), + encoded_price_feed.get(offset + 5).unwrap(), + encoded_price_feed.get(offset + 6).unwrap(), + encoded_price_feed.get(offset + 7).unwrap(), + ]); + offset += 8; + let ema_confidence = u64::from_be_bytes([ + encoded_price_feed.get(offset).unwrap(), + encoded_price_feed.get(offset + 1).unwrap(), + encoded_price_feed.get(offset + 2).unwrap(), + encoded_price_feed.get(offset + 3).unwrap(), + encoded_price_feed.get(offset + 4).unwrap(), + encoded_price_feed.get(offset + 5).unwrap(), + encoded_price_feed.get(offset + 6).unwrap(), + encoded_price_feed.get(offset + 7).unwrap(), + ]); + offset += 8; + require( + offset <= encoded_price_feed + .len(), + PythError::InvalidPriceFeedDataLength, + ); + //convert publish_time from UNIX to TAI64 + publish_time += TAI64_DIFFERENCE; + require( + publish_time <= timestamp(), + PythError::FuturePriceNotAllowed, + ); + PriceFeed::new( + Price::new(ema_confidence, exponent, ema_price, publish_time), + price_feed_id, + Price::new(confidence, exponent, price, publish_time), + ) + } + pub fn parse_attestation(attestation_size: u16, encoded_payload: Bytes, index: u64) -> Self { + // Skip product id (32 bytes) as unused + let mut attestation_index = index + 32; + let (_, slice) = encoded_payload.split_at(attestation_index); + let (price_feed_id, _) = slice.split_at(32); + let price_feed_id: PriceFeedId = price_feed_id.into(); + attestation_index += 32; + let mut price = u64::from_be_bytes([ + encoded_payload.get(attestation_index).unwrap(), + encoded_payload.get(attestation_index + 1).unwrap(), + encoded_payload.get(attestation_index + 2).unwrap(), + encoded_payload.get(attestation_index + 3).unwrap(), + encoded_payload.get(attestation_index + 4).unwrap(), + encoded_payload.get(attestation_index + 5).unwrap(), + encoded_payload.get(attestation_index + 6).unwrap(), + encoded_payload.get(attestation_index + 7).unwrap(), + ]); + attestation_index += 8; + let mut confidence = u64::from_be_bytes([ + encoded_payload.get(attestation_index).unwrap(), + encoded_payload.get(attestation_index + 1).unwrap(), + encoded_payload.get(attestation_index + 2).unwrap(), + encoded_payload.get(attestation_index + 3).unwrap(), + encoded_payload.get(attestation_index + 4).unwrap(), + encoded_payload.get(attestation_index + 5).unwrap(), + encoded_payload.get(attestation_index + 6).unwrap(), + encoded_payload.get(attestation_index + 7).unwrap(), + ]); + attestation_index += 8; + // exponent is an i32, expected to be in the range -255 to 0 + let exponent = u32::from_be_bytes([ + encoded_payload.get(attestation_index).unwrap(), + encoded_payload.get(attestation_index + 1).unwrap(), + encoded_payload.get(attestation_index + 2).unwrap(), + encoded_payload.get(attestation_index + 3).unwrap(), + ]); + let exponent = absolute_of_exponent(exponent); + require(exponent < 256u32, PythError::InvalidExponent); + attestation_index += 4; + let ema_price = u64::from_be_bytes([ + encoded_payload.get(attestation_index).unwrap(), + encoded_payload.get(attestation_index + 1).unwrap(), + encoded_payload.get(attestation_index + 2).unwrap(), + encoded_payload.get(attestation_index + 3).unwrap(), + encoded_payload.get(attestation_index + 4).unwrap(), + encoded_payload.get(attestation_index + 5).unwrap(), + encoded_payload.get(attestation_index + 6).unwrap(), + encoded_payload.get(attestation_index + 7).unwrap(), + ]); + attestation_index += 8; + let ema_confidence = u64::from_be_bytes([ + encoded_payload.get(attestation_index).unwrap(), + encoded_payload.get(attestation_index + 1).unwrap(), + encoded_payload.get(attestation_index + 2).unwrap(), + encoded_payload.get(attestation_index + 3).unwrap(), + encoded_payload.get(attestation_index + 4).unwrap(), + encoded_payload.get(attestation_index + 5).unwrap(), + encoded_payload.get(attestation_index + 6).unwrap(), + encoded_payload.get(attestation_index + 7).unwrap(), + ]); + attestation_index += 8; + // Status is an enum (encoded as u8) with the following values: + // 0 = UNKNOWN: The price feed is not currently updating for an unknown reason. + // 1 = TRADING: The price feed is updating as expected. + // 2 = HALTED: The price feed is not currently updating because trading in the product has been halted. + // 3 = AUCTION: The price feed is not currently updating because an auction is setting the price. + let status = encoded_payload.get(attestation_index).unwrap(); + // Additionally skip number_of publishers (8 bytes) and attestation_time (8 bytes); as unused + attestation_index += 17; + let mut publish_time = u64::from_be_bytes([ + encoded_payload.get(attestation_index).unwrap(), + encoded_payload.get(attestation_index + 1).unwrap(), + encoded_payload.get(attestation_index + 2).unwrap(), + encoded_payload.get(attestation_index + 3).unwrap(), + encoded_payload.get(attestation_index + 4).unwrap(), + encoded_payload.get(attestation_index + 5).unwrap(), + encoded_payload.get(attestation_index + 6).unwrap(), + encoded_payload.get(attestation_index + 7).unwrap(), + ]); + attestation_index += 8; + if status == 1u8 { + attestation_index += 24; + } else { + // If status is not trading then the latest available price is + // the previous price that is parsed here. + + // previous publish time + publish_time = u64::from_be_bytes([ + encoded_payload.get(attestation_index).unwrap(), + encoded_payload.get(attestation_index + 1).unwrap(), + encoded_payload.get(attestation_index + 2).unwrap(), + encoded_payload.get(attestation_index + 3).unwrap(), + encoded_payload.get(attestation_index + 4).unwrap(), + encoded_payload.get(attestation_index + 5).unwrap(), + encoded_payload.get(attestation_index + 6).unwrap(), + encoded_payload.get(attestation_index + 7).unwrap(), + ]); + attestation_index += 8; + // previous price + price = u64::from_be_bytes([ + encoded_payload.get(attestation_index).unwrap(), + encoded_payload.get(attestation_index + 1).unwrap(), + encoded_payload.get(attestation_index + 2).unwrap(), + encoded_payload.get(attestation_index + 3).unwrap(), + encoded_payload.get(attestation_index + 4).unwrap(), + encoded_payload.get(attestation_index + 5).unwrap(), + encoded_payload.get(attestation_index + 6).unwrap(), + encoded_payload.get(attestation_index + 7).unwrap(), + ]); + attestation_index += 8; + // previous confidence + confidence = u64::from_be_bytes([ + encoded_payload.get(attestation_index).unwrap(), + encoded_payload.get(attestation_index + 1).unwrap(), + encoded_payload.get(attestation_index + 2).unwrap(), + encoded_payload.get(attestation_index + 3).unwrap(), + encoded_payload.get(attestation_index + 4).unwrap(), + encoded_payload.get(attestation_index + 5).unwrap(), + encoded_payload.get(attestation_index + 6).unwrap(), + encoded_payload.get(attestation_index + 7).unwrap(), + ]); + attestation_index += 8; + } + require( + (attestation_index - index) <= attestation_size + .as_u64(), + PythError::InvalidAttestationSize, + ); + //convert publish_time from UNIX to TAI64 + publish_time += TAI64_DIFFERENCE; + PriceFeed::new( + Price::new(ema_confidence, exponent, ema_price, publish_time), + price_feed_id, + Price::new(confidence, exponent, price, publish_time), + ) + } +} +impl PriceFeed { + pub fn extract_from_merkle_proof(digest: Bytes, encoded_proof: Bytes, offset: u64) -> (u64, self) { + // In order to avoid `ref mut` param related MemoryWriteOverlap error + let mut current_offset = offset; + let message_size = u16::from_be_bytes([ + encoded_proof.get(current_offset).unwrap(), + encoded_proof.get(current_offset + 1).unwrap(), + ]).as_u64(); + current_offset += 2; + let (_, slice) = encoded_proof.split_at(current_offset); + let (encoded_message, _) = slice.split_at(message_size); + current_offset += message_size; + let end_offset = validate_proof( + encoded_proof, + current_offset, + digest, + encoded_message + .clone(), + ); + // Message type of 0 is a Price Feed + require( + encoded_message + .get(0) + .unwrap() == 0, + PythError::IncorrectMessageType, + ); + let price_feed = PriceFeed::parse_message(encoded_message); + (end_offset, price_feed) + } +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/update_type.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/update_type.sw new file mode 100644 index 00000000000..bf3706275c5 --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/update_type.sw @@ -0,0 +1,38 @@ +library; + +use std::{array_conversions::u32::*, bytes::Bytes}; + +use ::data_structures::{ + accumulator_update::AccumulatorUpdate, + batch_attestation_update::BatchAttestationUpdate, +}; + +const ACCUMULATOR_MAGIC: u32 = 0x504e4155; + +pub enum UpdateType { + Accumulator: AccumulatorUpdate, + BatchAttestation: BatchAttestationUpdate, +} + +impl UpdateType { + pub fn determine_type(data: Bytes) -> Self { + let (magic, _) = data.split_at(4); //TODO: Convert to u32 for comparison with const ACCUMULATOR_MAGIC. Use raw_ptr.read::()? Remove accumulator_magic_bytes() + if data.len() > 4 && magic == accumulator_magic_bytes() { + UpdateType::Accumulator(AccumulatorUpdate::new(data)) + } else { + UpdateType::BatchAttestation((BatchAttestationUpdate::new(data))) + } + } +} + +pub fn accumulator_magic_bytes() -> Bytes { + let accumulator_magic_array = ACCUMULATOR_MAGIC.to_be_bytes(); + + let mut accumulator_magic_bytes = Bytes::with_capacity(4); + accumulator_magic_bytes.push(accumulator_magic_array[0]); + accumulator_magic_bytes.push(accumulator_magic_array[1]); + accumulator_magic_bytes.push(accumulator_magic_array[2]); + accumulator_magic_bytes.push(accumulator_magic_array[3]); + + accumulator_magic_bytes +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/wormhole_light.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/wormhole_light.sw new file mode 100644 index 00000000000..848ff66f355 --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/data_structures/wormhole_light.sw @@ -0,0 +1,589 @@ +library; + +use ::data_structures::data_source::*; +use ::errors::WormholeError; + +use std::{ + array_conversions::{ + b256::*, + u16::*, + u32::*, + }, + b512::B512, + block::timestamp, + bytes::Bytes, + constants::ZERO_B256, + hash::{ + Hash, + keccak256, + sha256, + }, + storage::storage_vec::*, + vm::evm::ecr::ec_recover_evm_address, +}; + +pub const UPGRADE_MODULE: b256 = 0x00000000000000000000000000000000000000000000000000000000436f7265; + +pub struct GuardianSet { + pub expiration_time: u64, + pub keys: Vec, +} + +impl GuardianSet { + #[storage(read)] + pub fn from_stored(stored: StorageGuardianSet) -> Self { + Self { + expiration_time: stored.expiration_time, + keys: stored.keys.load_vec(), + } + } +} + +pub struct StorageGuardianSet { + pub expiration_time: u64, + pub keys: StorageKey>, +} + +impl StorageGuardianSet { + pub fn new(expiration_time: u64, keys: StorageKey>) -> Self { + StorageGuardianSet { + expiration_time, + keys, + } + } +} + +pub struct GuardianSetUpgrade { + pub action: u8, + pub chain: u16, + pub module: b256, + pub new_guardian_set: StorageGuardianSet, + pub new_guardian_set_index: u32, +} + +impl GuardianSetUpgrade { + pub fn new( + action: u8, + chain: u16, + module: b256, + new_guardian_set: StorageGuardianSet, + new_guardian_set_index: u32, + ) -> Self { + GuardianSetUpgrade { + action, + chain, + module, + new_guardian_set, + new_guardian_set_index, + } + } +} + +impl GuardianSetUpgrade { + #[storage(read, write)] + pub fn parse_encoded_upgrade(current_guardian_set_index: u32, encoded_upgrade: Bytes) -> Self { + let mut index = 0; + let (_, slice) = encoded_upgrade.split_at(index); + let (module, _) = slice.split_at(32); + let module: b256 = module.into(); + require(module == UPGRADE_MODULE, WormholeError::InvalidModule); + index += 32; + let action = encoded_upgrade.get(index).unwrap(); + require(action == 2, WormholeError::InvalidGovernanceAction); + index += 1; + let chain = u16::from_be_bytes([encoded_upgrade.get(index).unwrap(), encoded_upgrade.get(index + 1).unwrap()]); + index += 2; + let new_guardian_set_index = u32::from_be_bytes([ + encoded_upgrade.get(index).unwrap(), + encoded_upgrade.get(index + 1).unwrap(), + encoded_upgrade.get(index + 2).unwrap(), + encoded_upgrade.get(index + 3).unwrap(), + ]); + require( + new_guardian_set_index > current_guardian_set_index, + WormholeError::NewGuardianSetIndexIsInvalid, + ); + index += 4; + let guardian_length = encoded_upgrade.get(index).unwrap(); + index += 1; + let mut new_guardian_set: StorageGuardianSet = StorageGuardianSet::new( + 0, + StorageKey::>::new( + ZERO_B256, + 0, + sha256(("guardian_set_keys", new_guardian_set_index)), + ), + ); + let mut i: u8 = 0; + while i < guardian_length { + let (_, slice) = encoded_upgrade.split_at(index); + let (key, _) = slice.split_at(20); + let key: b256 = key.into(); + new_guardian_set.keys.push(key.rsh(96)); + index += 20; + i += 1; + } + require( + new_guardian_set + .keys + .len() == guardian_length + .as_u64(), + WormholeError::GuardianSetKeysLengthNotEqual, + ); + require( + encoded_upgrade + .len() == index, + WormholeError::InvalidGuardianSetUpgradeLength, + ); + GuardianSetUpgrade::new( + action, + chain, + module, + new_guardian_set, + new_guardian_set_index, + ) + } +} + +pub struct GuardianSignature { + guardian_index: u8, + r: b256, + s: b256, + v: u8, +} + +impl GuardianSignature { + pub fn new(guardian_index: u8, r: b256, s: b256, v: u8) -> Self { + GuardianSignature { + guardian_index, + r, + s, + v, + } + } + // eip-2098: Compact Signature Representation + pub fn compact(self) -> B512 { + let y_parity = b256::from_be_bytes([ + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + 0u8, + self.v - 27u8, + ]); + let shifted_y_parity = y_parity.lsh(255); + let y_parity_and_s = b256::binary_or(shifted_y_parity, self.s); + B512::from((self.r, y_parity_and_s)) + } +} + +impl GuardianSignature { + pub fn verify( + self, + guardian_set_key: b256, + hash: b256, + index: u64, + last_index: u64, +) { + // Ensure that provided signature indices are ascending only + if index > 0 { + require( + self.guardian_index + .as_u64() > last_index, + WormholeError::SignatureIndicesNotAscending, + ); + } + let recovered_signer = ec_recover_evm_address(self.compact(), hash); + require( + recovered_signer + .is_ok() && recovered_signer + .unwrap() + .bits() == guardian_set_key, + WormholeError::SignatureInvalid, + ); + } +} + +pub struct WormholeVM { + pub version: u8, + pub guardian_set_index: u32, + pub governance_action_hash: b256, + // signatures: Vec, // Shown here to represent data layout of VM, but not needed + pub timestamp: u32, + pub nonce: u32, + pub emitter_chain_id: u16, + pub emitter_address: b256, + pub sequence: u64, + pub consistency_level: u8, + pub payload: Bytes, +} + +impl WormholeVM { + pub fn default() -> Self { + WormholeVM { + version: 0u8, + guardian_set_index: 0u32, + governance_action_hash: ZERO_B256, + timestamp: 0u32, + nonce: 0u32, + emitter_chain_id: 0u16, + emitter_address: ZERO_B256, + sequence: 0u64, + consistency_level: 0u8, + payload: Bytes::new(), + } + } + pub fn new( + version: u8, + guardian_set_index: u32, + governance_action_hash: b256, + timestamp_: u32, + nonce: u32, + emitter_chain_id: u16, + emitter_address: b256, + sequence: u64, + consistency_level: u8, + payload: Bytes, + ) -> Self { + WormholeVM { + version, + guardian_set_index, + governance_action_hash, + timestamp: timestamp_, + nonce, + emitter_chain_id, + emitter_address, + sequence, + consistency_level, + payload, + } + } +} + +impl WormholeVM { + #[storage(read)] + pub fn parse_and_verify_wormhole_vm( + current_guardian_set_index: u32, + encoded_vm: Bytes, + wormhole_guardian_sets: StorageKey>, + ) -> Self { + let mut index = 0; + let version = encoded_vm.get(index); + require( + version + .is_some() && version + .unwrap() == 1, + WormholeError::VMVersionIncompatible, + ); + index += 1; + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(4); //replace with slice() + let guardian_set_index = u32::from_be_bytes([ + //replace with func + slice.get(0).unwrap(), + slice.get(1).unwrap(), + slice.get(2).unwrap(), + slice.get(3).unwrap(), + ]); + index += 4; + let guardian_set = wormhole_guardian_sets.get(guardian_set_index).try_read(); + require(guardian_set.is_some(), WormholeError::GuardianSetNotFound); + let guardian_set = guardian_set.unwrap(); + require( + guardian_set + .keys + .len() > 0, + WormholeError::InvalidGuardianSetKeysLength, + ); + require( + guardian_set_index == current_guardian_set_index && (guardian_set + .expiration_time == 0 || guardian_set + .expiration_time > timestamp()), + WormholeError::InvalidGuardianSet, + ); + let signers_length = encoded_vm.get(index); + require( + signers_length + .is_some(), + WormholeError::SignersLengthIrretrievable, + ); + let signers_length = signers_length.unwrap().as_u64(); + index += 1; + // 66 is the length of each guardian signature + // 1 (guardianIndex) + 32 (r) + 32 (s) + 1 (v) + let hash_index = index + (signers_length * 66); + require( + hash_index < encoded_vm + .len(), + WormholeError::InvalidSignatureLength, + ); + let (_, slice) = encoded_vm.split_at(hash_index); + let hash = keccak256(keccak256(slice)); + let mut last_index = 0; + let mut i = 0; + while i < signers_length { + let guardian_index = encoded_vm.get(index); + require( + guardian_index + .is_some(), + WormholeError::GuardianIndexIrretrievable, + ); + let guardian_index = guardian_index.unwrap(); + index += 1; + let (_, slice) = encoded_vm.split_at(index); + let (slice, remainder) = slice.split_at(32); + let r: b256 = slice.into(); + index += 32; + let (slice, remainder) = remainder.split_at(32); + let s: b256 = slice.into(); + index += 32; + let v = remainder.get(0); + require(v.is_some(), WormholeError::SignatureVIrretrievable); + let v = v.unwrap() + 27; + index += 1; + let guardian_set_key = guardian_set.keys.get(guardian_index.as_u64()); + require( + guardian_set_key + .is_some(), + WormholeError::GuardianSetKeyIrretrievable, + ); + GuardianSignature::new(guardian_index, r, s, v) + .verify(guardian_set_key.unwrap().read(), hash, i, last_index); + last_index = guardian_index.as_u64(); + i += 1; + } + /* + We're using a fixed point number transformation with 1 decimal to deal with rounding. + This quorum check is critical to assessing whether we have enough Guardian signatures to validate a VM. + If guardian set key length is 0 and signatures length is 0, this could compromise the integrity of both VM and signature verification. + */ + require( + ((((guardian_set + .keys + .len() * 10) / 3) * 2) / 10 + 1) <= signers_length, + WormholeError::NoQuorum, + ); + //ignore VM.signatures + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(4); + let _timestamp = u32::from_be_bytes([ + slice.get(0).unwrap(), + slice.get(1).unwrap(), + slice.get(2).unwrap(), + slice.get(3).unwrap(), + ]); + index += 4; + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(4); + let nonce = u32::from_be_bytes([ + slice.get(0).unwrap(), + slice.get(1).unwrap(), + slice.get(2).unwrap(), + slice.get(3).unwrap(), + ]); + index += 4; + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(2); + let emitter_chain_id = u16::from_be_bytes([slice.get(0).unwrap(), slice.get(1).unwrap()]); + index += 2; + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(32); + let emitter_address: b256 = slice.into(); + index += 32; + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(8); + let sequence = u64::from_be_bytes([ + slice.get(0).unwrap(), + slice.get(1).unwrap(), + slice.get(2).unwrap(), + slice.get(3).unwrap(), + slice.get(4).unwrap(), + slice.get(5).unwrap(), + slice.get(6).unwrap(), + slice.get(7).unwrap(), + ]); + index += 8; + let consistency_level = encoded_vm.get(index); + require( + consistency_level + .is_some(), + WormholeError::ConsistencyLevelIrretrievable, + ); + index += 1; + require( + index <= encoded_vm + .len(), + WormholeError::InvalidPayloadLength, + ); + let (_, payload) = encoded_vm.split_at(index); + WormholeVM::new( + version + .unwrap(), + guardian_set_index, + hash, + _timestamp, + nonce, + emitter_chain_id, + emitter_address, + sequence, + consistency_level + .unwrap(), + payload, + ) + } + pub fn parse_initial_wormhole_vm(encoded_vm: Bytes) -> Self { + let mut index = 0; + let version = encoded_vm.get(index); + require( + version + .is_some() && version + .unwrap() == 1, + WormholeError::VMVersionIncompatible, + ); + index += 1; + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(4); //replace with slice() + let guardian_set_index = u32::from_be_bytes([ + //replace with func + slice.get(0).unwrap(), + slice.get(1).unwrap(), + slice.get(2).unwrap(), + slice.get(3).unwrap(), + ]); + index += 4; + let signers_length = encoded_vm.get(index); + require( + signers_length + .is_some(), + WormholeError::SignersLengthIrretrievable, + ); + let signers_length = signers_length.unwrap().as_u64(); + index += 1; + // 66 is the length of each guardian signature + // 1 (guardianIndex) + 32 (r) + 32 (s) + 1 (v) + let hash_index = index + (signers_length * 66); + require( + hash_index < encoded_vm + .len(), + WormholeError::InvalidSignatureLength, + ); + let (_, slice) = encoded_vm.split_at(hash_index); + let hash = keccak256(keccak256(slice)); + // account for signatures + index += 66 * signers_length; + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(4); + let timestamp_ = u32::from_be_bytes([ + slice.get(0).unwrap(), + slice.get(1).unwrap(), + slice.get(2).unwrap(), + slice.get(3).unwrap(), + ]); + index += 4; + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(4); + let nonce = u32::from_be_bytes([ + slice.get(0).unwrap(), + slice.get(1).unwrap(), + slice.get(2).unwrap(), + slice.get(3).unwrap(), + ]); + index += 4; + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(2); + let emitter_chain_id = u16::from_be_bytes([slice.get(0).unwrap(), slice.get(1).unwrap()]); + index += 2; + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(32); + let emitter_address: b256 = slice.into(); + index += 32; + let (_, slice) = encoded_vm.split_at(index); + let (slice, _) = slice.split_at(8); + let sequence = u64::from_be_bytes([ + slice.get(0).unwrap(), + slice.get(1).unwrap(), + slice.get(2).unwrap(), + slice.get(3).unwrap(), + slice.get(4).unwrap(), + slice.get(5).unwrap(), + slice.get(6).unwrap(), + slice.get(7).unwrap(), + ]); + index += 8; + let consistency_level = encoded_vm.get(index); + require( + consistency_level + .is_some(), + WormholeError::ConsistencyLevelIrretrievable, + ); + index += 1; + require( + index <= encoded_vm + .len(), + WormholeError::InvalidPayloadLength, + ); + let (_, payload) = encoded_vm.split_at(index); + WormholeVM::new( + version + .unwrap(), + guardian_set_index, + hash, + timestamp_, + nonce, + emitter_chain_id, + emitter_address, + sequence, + consistency_level + .unwrap(), + payload, + ) + } +} + +impl WormholeVM { + #[storage(read)] + pub fn parse_and_verify_pyth_vm( + current_guardian_set_index: u32, + encoded_vm: Bytes, + wormhole_guardian_sets: StorageKey>, + is_valid_data_source: StorageKey>, + ) -> Self { + let vm = WormholeVM::parse_and_verify_wormhole_vm( + current_guardian_set_index, + encoded_vm, + wormhole_guardian_sets, + ); + require( + DataSource::new(vm.emitter_chain_id, vm.emitter_address) + .is_valid_data_source(is_valid_data_source), + WormholeError::InvalidUpdateDataSource, + ); + vm + } +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/errors.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/errors.sw new file mode 100644 index 00000000000..ab74947ae83 --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/errors.sw @@ -0,0 +1,72 @@ +library; + +pub enum PythError { + FeesCanOnlyBePaidInTheBaseAsset: (), + FuturePriceNotAllowed: (), + GuardianSetNotFound: (), + IncorrectMessageType: (), + InsufficientFee: (), + InvalidArgument: (), + InvalidAttestationSize: (), + InvalidDataSourcesLength: (), + InvalidExponent: (), + InvalidGovernanceDataSource: (), + InvalidGovernanceAction: (), + InvalidGovernanceMessage: (), + InvalidGovernanceModule: (), + InvalidGovernanceTarget: (), + InvalidHeaderSize: (), + InvalidMagic: (), + InvalidMajorVersion: (), + InvalidMinorVersion: (), + InvalidPayloadId: (), + InvalidPayloadLength: (), + InvalidPriceFeedDataLength: (), + InvalidProof: (), + InvalidUpdateData: (), + InvalidUpdateDataLength: (), + InvalidUpdateDataSource: (), + InvalidUpgradeModule: (), + InvalidWormholeAddressToSet: (), + LengthOfPriceFeedIdsAndPublishTimesMustMatch: (), + NewGuardianSetIsEmpty: (), + NumberOfUpdatesIrretrievable: (), + OldGovernanceMessage: (), + /// Emitted when a Price's `publish_time` is stale. + OutdatedPrice: (), + /// Emitted when a PriceFeed could not be retrieved. + PriceFeedNotFound: (), + PriceFeedNotFoundWithinRange: (), + WormholeGovernanceActionNotFound: (), +} + +pub enum WormholeError { + ConsistencyLevelIrretrievable: (), + GovernanceActionAlreadyConsumed: (), + GuardianIndexIrretrievable: (), + GuardianSetHasExpired: (), + GuardianSetKeyIrretrievable: (), + GuardianSetKeysLengthNotEqual: (), + GuardianSetNotFound: (), + InvalidGovernanceAction: (), + InvalidGovernanceChain: (), + InvalidGovernanceContract: (), + InvalidGuardianSet: (), + InvalidGuardianSetKeysLength: (), + InvalidGuardianSetUpgrade: (), + InvalidGuardianSetUpgradeLength: (), + InvalidModule: (), + InvalidPayloadLength: (), + InvalidSignatureLength: (), + InvalidUpdateDataSource: (), + NewGuardianSetIsEmpty: (), + NewGuardianSetIndexIsInvalid: (), + NoQuorum: (), + NotSignedByCurrentGuardianSet: (), + SignatureInvalid: (), + SignatureIndicesNotAscending: (), + SignatureVIrretrievable: (), + SignersLengthIrretrievable: (), + VMSignatureInvalid: (), + VMVersionIncompatible: (), +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/events.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/events.sw new file mode 100644 index 00000000000..4ccfecf2e4e --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/events.sw @@ -0,0 +1,43 @@ +library; + +use ::data_structures::{data_source::DataSource, price::PriceFeedId,}; + +pub struct ConstructedEvent { + pub guardian_set_index: u32, +} + +pub struct NewGuardianSetEvent { + pub governance_action_hash: b256, + // new_guardian_set: GuardianSet, // TODO: Uncomment when SDK supports logs with nested Vecs https://github.com/FuelLabs/fuels-rs/issues/1046 + pub new_guardian_set_index: u32, +} + +pub struct UpdatedPriceFeedsEvent { + pub updated_price_feeds: Vec, +} + +pub struct ContractUpgradedEvent { + pub old_implementation: Identity, + pub new_implementation: Identity, +} + +pub struct GovernanceDataSourceSetEvent { + pub old_data_source: DataSource, + pub new_data_source: DataSource, + pub initial_sequence: u64, +} + +pub struct DataSourcesSetEvent { + pub old_data_sources: Vec, + pub new_data_sources: Vec, +} + +pub struct FeeSetEvent { + pub old_fee: u64, + pub new_fee: u64, +} + +pub struct ValidPeriodSetEvent { + pub old_valid_period: u64, + pub new_valid_period: u64, +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/interface.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/interface.sw new file mode 100644 index 00000000000..43ac8c334f6 --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/interface.sw @@ -0,0 +1,323 @@ +library; + +// The order of the modules is important because of the dependencies between them. +pub mod pyth_merkle_proof; +pub mod errors; +pub mod utils; +pub mod events; +pub mod data_structures; + +use ::data_structures::{ + data_source::DataSource, + governance_payload::UpgradeContractPayload, + price::{ + Price, + PriceFeed, + PriceFeedId, + }, + wormhole_light::{ + GuardianSet, + }, +}; +use std::{bytes::Bytes, storage::storage_vec::*}; + +abi PythCore { + /// This function returns the exponentially-weighted moving average price and confidence interval. + /// + /// # Arguments + /// + /// * `price_feed_id`: [PriceFeedId] - The Pyth Price Feed ID of which to fetch the EMA price and confidence interval. + /// + /// # Returns + /// + /// * [Price] - Please read the documentation of data_structures::price to understand how to use this safely. + /// + /// # Reverts + /// + /// * When the EMA price is not available. + #[storage(read)] + fn ema_price(price_feed_id: PriceFeedId) -> Price; + + /// This function Returns the exponentially-weighted moving average price that is no older than `time` seconds + /// from the current time. + /// + /// # Additional Information + /// + /// This function is a sanity-checked version of `ema_price_unsafe` which is useful in + /// applications that require a sufficiently-recent price. + /// + /// # Arguments + /// + /// * `time_period`: [u64] - The period (in seconds) that a price feed is considered valid since its publish time. + /// * `price_feed_id`: [PriceFeedId] - The Pyth Price Feed ID of which to fetch the EMA price and confidence interval. + /// + /// # Returns + /// + /// * [Price] - Please read the documentation of data_structures::price to understand how to use this safely. + /// + /// # Reverts + /// + /// * When the EMA price is not available. + /// * When the EMA price wasn't updated recently enough. + #[storage(read)] + fn ema_price_no_older_than(time_period: u64, price_feed_id: PriceFeedId) -> Price; + + /// This function returns the exponentially-weighted moving average price of a price feed without any sanity checks. + /// + /// # Additional Information + /// + /// This function returns the same price as `ema_price` in the case where the price is available. + /// However, if the price is not recent this function returns the latest available price. + /// + /// The returned price can be from arbitrarily far in the past; this function makes no guarantees that + /// the returned price is recent or useful for any particular application. + /// + /// Users of this function should check the `publish_time` in the `Price` to ensure that the returned price is + /// sufficiently recent for their application. If you are considering using this function, it may be + /// safer / easier to use either `ema_price` or `ema_price_no_older_than`. + /// + /// # Arguments + /// + /// * `price_feed_id`: [PriceFeedId] - The Pyth Price Feed ID of which to fetch the EMA price and confidence interval. + /// + /// # Returns + /// + /// * [Price] - Please read the documentation of data_structures::price to understand how to use this safely. + #[storage(read)] + fn ema_price_unsafe(price_feed_id: PriceFeedId) -> Price; + + /// This function parses `update_data` and returns price feeds of the given `price_feed_ids` if they are all published + /// within `min_publish_time` and `max_publish_time`. + /// + /// # Additional Information + /// + /// You can use this method if you want to use a Pyth price at a fixed time and not the most recent price; + /// otherwise, please consider using `update_price_feeds`. This method does not store the price updates on-chain. + /// + /// This method requires the caller to pay a fee in wei; the required fee can be computed by calling + /// `update_fee`. + /// + /// # Arguments + /// + /// * `max_publish_time`: [u64] - The maximum acceptable `publish_time` for the given `price_feed_ids`. + /// * `min_publish_time`: [u64] - The minimum acceptable `publish_time` for the given `price_feed_ids`. + /// * `price_feed_ids`: [Vec] - The ids of the price feeds to return PriceFeed data for. + /// * `update_data`: [Bytes] - The price update data. + /// + /// # Returns + /// + /// * [u64] - The number of hashes performed. + /// + /// # Reverts + /// + /// * When the transferred fee is not sufficient + /// * When the update_data is invalid + /// * When there is no update for any of the given `priceIds` within the given time range. + #[storage(read), payable] + fn parse_price_feed_updates( + max_publish_time: u64, + min_publish_time: u64, + price_feed_ids: Vec, + update_data: Vec, + ) -> Vec; + + /// This function returns the price and confidence interval. + /// + /// # Additional Information + /// + /// This function also has some complex behaviours. + /// + /// # Arguments + /// + /// * `price_feed_id`: [PriceFeedId] - The Pyth Price Feed ID of which to fetch the EMA price and confidence interval. + /// + /// # Returns + /// + /// * [Price] - Please read the documentation of data_structures::price to understand how to use this safely. + /// + /// # Reverts + /// + /// * When the price has not been updated within the last valid time period. + #[storage(read)] + fn price(price_feed_id: PriceFeedId) -> Price; + + /// This function returns the price that is no older than `time` seconds of the current time. + /// + /// # Additional Information + /// + /// This function is a sanity-checked version of `price_unsafe` which is useful in applications that require a + /// sufficiently-recent price. Reverts if the price wasn't updated sufficiently recently. + /// + /// # Arguments + /// + /// * `time_period`: [u64] - The period (in seconds) that a price feed is considered valid since its publish time. + /// * `price_feed_id`: [PriceFeedId] - The Pyth Price Feed ID of which to fetch the EMA price and confidence interval. + /// + /// # Returns + /// + /// * [Price] - Please read the documentation of data_structures::price to understand how to use this safely. + /// + /// # Reverts + /// + /// * When the price is not available. + /// * When the price wasn't updated recently enough. + #[storage(read)] + fn price_no_older_than(time_period: u64, price_feed_id: PriceFeedId) -> Price; + + /// This function returns the price of a price feed without any sanity checks. + /// + /// # Additional Information + /// + /// This function returns the most recent price update in this contract without any recency checks. + /// This function is unsafe as the returned price update may be arbitrarily far in the past. + /// + /// Users of this function should check the `publish_time` in the price to ensure that the returned price is + /// sufficiently recent for their application. If you are considering using this function, it may be + /// safer / easier to use either `getPrice` or `price_no_older_than`. + /// + /// # Arguments + /// + /// * `price_feed_id`: [PriceFeedId] - The Pyth Price Feed ID of which to fetch the EMA price and confidence interval. + /// + /// # Returns + /// + /// * [Price] - Please read the documentation of data_structures::price to understand how to use this safely. + #[storage(read)] + fn price_unsafe(price_feed_id: PriceFeedId) -> Price; + + /// This function returns the required fee in Wei to update an array of price updates. + /// + /// # Arguments + /// + /// * `update_data`: [Bytes] - The price update data. + /// + /// # Returns + /// + /// * [u64] - The required fee in Wei. + #[storage(read)] + fn update_fee(update_data: Vec) -> u64; + + /// This function updates price feeds with the given update messages. + /// + /// # Additional Information + /// + /// This function requires the caller to pay a fee in wei; the required fee can be computed by calling + /// `update_fee`. + /// Prices will be updated if they are more recent than the current stored prices. + /// The call will succeed even if the update is not the most recent. + /// + /// # Arguments + /// + /// * `update_data`: [Bytes] - The price update data. + /// + /// # Reverts + /// + /// * When the transferred fee is not sufficient. + /// * When the `update_data` is invalid. + #[storage(read, write), payable] + fn update_price_feeds(update_data: Vec); + + /// This function is a wrapper around `update_price_feeds` that reverts fast if a price update is not necessary. + /// + /// # Additional Information + /// + /// A price update is necessary if the current on-chain `publish_time` is older than the given `publish_time`. It relies solely on the + /// given `publish_time` for the price feeds and does not read the actual price update publish time within `update_data`. + /// + /// This method requires the caller to pay a fee in wei; the required fee can be computed by calling + /// `update_fee`. + /// + /// `price_feed_ids` and `publish_times` are two arrays with the same size that correspond to senders known `publish_time` + /// of each PriceFeedId when calling this method. If all of price feeds within `price_feed_ids` have updated and have + /// a newer or equal publish time than the given publish time, it will reject the transaction to save gas. + /// Otherwise, it calls `update_price_feeds` to update the prices. + /// + /// # Arguments + /// + /// * `price_feed_ids`: [Vec] - Vector of price feed ids; `price_feed_ids[i]` corresponds to known price feed id of `publish_times[i]`. + /// * `publish_times`: [Vec] - Vector of publish times; `publish_times[i]` corresponds to known publish time of `price_feed_ids[i]`. + /// * `update_data`: [Bytes] - The price update data. + /// + /// + /// # Reverts + /// + /// * When update is not necessary. + /// * When the transferred fee is not sufficient. + /// * When the `update_data` is invalid. + #[storage(read, write), payable] + fn update_price_feeds_if_necessary( + price_feed_ids: Vec, + publish_times: Vec, + update_data: Vec, + ); + + /// This function returns the period (in seconds) that a price feed is considered valid since its publish time. + /// + /// # Returns + /// + /// * [u64] - The period (in seconds) that a price feed is considered valid since its publish time. + #[storage(read)] + fn valid_time_period() -> u64; +} + +abi PythInit { + #[storage(read, write)] + fn constructor( + data_sources: Vec, + governance_data_source: DataSource, + wormhole_governance_data_source: DataSource, + single_update_fee: u64, + valid_time_period_seconds: u64, + wormhole_guardian_set_addresses: Vec, + wormhole_guardian_set_index: u32, + chain_id: u16, + ); +} + +abi PythInfo { + #[storage(read)] + fn latest_publish_time(price_feed_id: PriceFeedId) -> u64; + + /// @notice Returns true if a price feed with the given id exists. + /// @param price_feed_id The Pyth Price Feed ID of which to check its existence. + #[storage(read)] + fn price_feed_exists(price_feed_id: PriceFeedId) -> bool; + + /// @notice Returns the price feed with given id. + /// @dev Reverts if the price does not exist. + /// @param price_feed_id The Pyth Price Feed ID of which to fetch the PriceFeed. + #[storage(read)] + fn price_feed_unsafe(price_feed_id: PriceFeedId) -> PriceFeed; + + #[storage(read)] + fn single_update_fee() -> u64; + + #[storage(read)] + fn is_valid_data_source(data_source: DataSource) -> bool; + + #[storage(read)] + fn valid_data_sources() -> Vec; + + #[storage(read)] + fn last_executed_governance_sequence() -> u64; + + #[storage(read)] + fn chain_id() -> u16; +} + +abi WormholeGuardians { + #[storage(read)] + fn current_guardian_set_index() -> u32; + + #[storage(read)] + fn current_wormhole_provider() -> DataSource; + + #[storage(read)] + fn governance_action_is_consumed(hash: b256) -> bool; + + #[storage(read)] + fn guardian_set(index: u32) -> GuardianSet; + + #[storage(read, write)] + fn submit_new_guardian_set(vm: Bytes); +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/pyth_merkle_proof.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/pyth_merkle_proof.sw new file mode 100644 index 00000000000..a26f91fcb0c --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/pyth_merkle_proof.sw @@ -0,0 +1,63 @@ +library; + +use std::{bytes::Bytes, hash::{Hash, keccak256}}; +use ::errors::PythError; + +pub const MERKLE_LEAF_PREFIX = 0u8; +pub const MERKLE_NODE_PREFIX = 1u8; + +fn leaf_hash(data: Bytes) -> Bytes { + let mut bytes = Bytes::new(); + bytes.push(MERKLE_LEAF_PREFIX); + bytes.append(data); + + let (slice, _) = Bytes::from(keccak256(bytes)).split_at(20); + + slice +} + +fn node_hash(child_a: Bytes, child_b: Bytes) -> Bytes { + let mut bytes = Bytes::with_capacity(41); + bytes.push(MERKLE_NODE_PREFIX); + + let a: b256 = child_a.into(); + let b: b256 = child_b.into(); + if a > b { + bytes.append(child_b); + bytes.append(child_a); + } else { + bytes.append(child_a); + bytes.append(child_b); + } + + let (slice, _) = Bytes::from(keccak256(bytes)).split_at(20); + + slice +} + +pub fn validate_proof( + encoded_proof: Bytes, + ref mut proof_offset: u64, + root: Bytes, + leaf_data: Bytes, +) -> u64 { + let mut current_digest = leaf_hash(leaf_data); + let proof_size = encoded_proof.get(proof_offset).unwrap().as_u64(); + proof_offset += 1; + + let mut i = 0; + while i < proof_size { + let (_, slice) = encoded_proof.split_at(proof_offset); + let (sibling_digest, _) = slice.split_at(20); + proof_offset += 20; + current_digest = node_hash(current_digest, sibling_digest); + i += 1; + } + + let current_digest_b256: b256 = current_digest.into(); + let root_b256: b256 = root.into(); + + require(current_digest_b256 == root_b256, PythError::InvalidProof); + + proof_offset +} diff --git a/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/utils.sw b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/utils.sw new file mode 100644 index 00000000000..28ef0b430b2 --- /dev/null +++ b/internal/benchmarks/test/fixtures/forc-projects/pyth-interface/src/utils.sw @@ -0,0 +1,17 @@ +library; + +pub fn absolute_of_exponent(exponent: u32) -> u32 { + if exponent == 0u32 { + exponent + } else { + u32::max() - exponent + 1 + } +} + +#[storage(read)] +pub fn total_fee( + total_number_of_updates: u64, + single_update_fee: StorageKey, +) -> u64 { + total_number_of_updates * single_update_fee.read() +}