From 683a1283827ba116d594d62fc563c376394f562b Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 11 Sep 2023 17:31:28 +0700 Subject: [PATCH 001/143] test: added CI for the Polkadex network --- tests/.mocha.env | 20 +++ tests/.vscode/settings.json | 8 +- tests/src/config.ts | 1 + tests/src/util/index.ts | 4 +- tests/src/util/playgrounds/unique.dev.ts | 16 +- tests/src/util/playgrounds/unique.ts | 30 ++++ tests/src/xcm/xcmUnique.test.ts | 217 ++++++++++++++++++++++- 7 files changed, 291 insertions(+), 5 deletions(-) create mode 100644 tests/.mocha.env diff --git a/tests/.mocha.env b/tests/.mocha.env new file mode 100644 index 0000000000..f3956f59e7 --- /dev/null +++ b/tests/.mocha.env @@ -0,0 +1,20 @@ +RELAY_ACALA_ID=1002 +RELAY_ASTAR_ID=1005 +RELAY_MOONBEAM_ID=1003 +RELAY_POLKADEX_ID=1006 +RELAY_STATEMINT_ID=1004 +RELAY_UNIQUE_ID=1001 +RELAY_HTTP_URL=http://127.0.0.1:9699/relay/ +RELAY_ACALA_HTTP_URL=http://127.0.0.1:9699/relay-acala/ +RELAY_ASTAR_HTTP_URL=http://127.0.0.1:9699/relay-astar/ +RELAY_MOONBEAM_HTTP_URL=http://127.0.0.1:9699/relay-moonbeam/ +RELAY_POLKADEX_HTTP_URL=http://127.0.0.1:9699/relay-polkadex/ +RELAY_STATEMINT_HTTP_URL=http://127.0.0.1:9699/relay-statemint/ +RELAY_UNIQUE_HTTP_URL=http://127.0.0.1:9699/relay-unique/ +RELAY_URL=ws://127.0.0.1:9699/relay/ +RELAY_ACALA_URL=ws://127.0.0.1:9699/relay-acala/ +RELAY_ASTAR_URL=ws://127.0.0.1:9699/relay-astar/ +RELAY_MOONBEAM_URL=ws://127.0.0.1:9699/relay-moonbeam/ +RELAY_POLKADEX_URL=ws://127.0.0.1:9699/relay-polkadex/ +RELAY_STATEMINT_URL=ws://127.0.0.1:9699/relay-statemint/ +RELAY_UNIQUE_URL=ws://127.0.0.1:9699/relay-unique/ diff --git a/tests/.vscode/settings.json b/tests/.vscode/settings.json index b05c111ade..de3123d97b 100644 --- a/tests/.vscode/settings.json +++ b/tests/.vscode/settings.json @@ -1,6 +1,10 @@ { - "mocha.enabled": true, - "mochaExplorer.files": "**/*.test.ts", + "mochaExplorer.env": { + "RUN_GOV_TESTS": "1", + "RUN_XCM_TESTS": "1" + }, + "mochaExplorer.files": "src/**/*.test.ts", + "mochaExplorer.envPath": ".mocha.env", "mochaExplorer.require": "ts-node/register", "eslint.format.enable": true, "[javascript]": { diff --git a/tests/src/config.ts b/tests/src/config.ts index 364d036b31..4c826d0ab3 100644 --- a/tests/src/config.ts +++ b/tests/src/config.ts @@ -28,6 +28,7 @@ const config = { westmintUrl: process.env.RELAY_WESTMINT_URL || 'ws://127.0.0.1:9948', statemineUrl: process.env.RELAY_STATEMINE_URL || 'ws://127.0.0.1:9948', statemintUrl: process.env.RELAY_STATEMINT_URL || 'ws://127.0.0.1:9948', + polkadexUrl: process.env.RELAY_POLKADEX_URL || 'ws://127.0.0.1:9950', }; export default config; diff --git a/tests/src/util/index.ts b/tests/src/util/index.ts index cb49f2e96c..9b5da2ce22 100644 --- a/tests/src/util/index.ts +++ b/tests/src/util/index.ts @@ -11,7 +11,7 @@ import {Context} from 'mocha'; import config from '../config'; import {ChainHelperBase} from './playgrounds/unique'; import {ILogger} from './playgrounds/types'; -import {DevUniqueHelper, SilentLogger, SilentConsole, DevMoonbeamHelper, DevMoonriverHelper, DevAcalaHelper, DevKaruraHelper, DevRelayHelper, DevWestmintHelper, DevStatemineHelper, DevStatemintHelper, DevAstarHelper, DevShidenHelper} from './playgrounds/unique.dev'; +import {DevUniqueHelper, SilentLogger, SilentConsole, DevMoonbeamHelper, DevMoonriverHelper, DevAcalaHelper, DevKaruraHelper, DevRelayHelper, DevWestmintHelper, DevStatemineHelper, DevStatemintHelper, DevAstarHelper, DevShidenHelper, DevPolkadexHelper} from './playgrounds/unique.dev'; import {dirname} from 'path'; import {fileURLToPath} from 'url'; @@ -89,6 +89,8 @@ export const usingAstarPlaygrounds = (url: string, code: (helper: DevAstarHelper export const usingShidenPlaygrounds = (url: string, code: (helper: DevShidenHelper, privateKey: (seed: string) => Promise) => Promise) => usingPlaygroundsGeneral(DevShidenHelper, url, code); +export const usingPolkadexPlaygrounds = (url: string, code: (helper: DevPolkadexHelper, privateKey: (seed: string) => Promise) => Promise) => usingPlaygroundsGeneral(DevPolkadexHelper, url, code); + export const MINIMUM_DONOR_FUND = 4_000_000n; export const DONOR_FUNDING = 4_000_000n; diff --git a/tests/src/util/playgrounds/unique.dev.ts b/tests/src/util/playgrounds/unique.dev.ts index ccb8bfb5b5..7532a3defe 100644 --- a/tests/src/util/playgrounds/unique.dev.ts +++ b/tests/src/util/playgrounds/unique.dev.ts @@ -3,7 +3,7 @@ import {stringToU8a} from '@polkadot/util'; import {blake2AsHex, encodeAddress, mnemonicGenerate} from '@polkadot/util-crypto'; -import {UniqueHelper, MoonbeamHelper, ChainHelperBase, AcalaHelper, RelayHelper, WestmintHelper, AstarHelper} from './unique'; +import {UniqueHelper, MoonbeamHelper, ChainHelperBase, AcalaHelper, RelayHelper, WestmintHelper, AstarHelper, PolkadexHelper} from './unique'; import {ApiPromise, Keyring, WsProvider} from '@polkadot/api'; import * as defs from '../../interfaces/definitions'; import {IKeyringPair} from '@polkadot/types/types'; @@ -249,6 +249,10 @@ export class Event { messageHash: eventJsonData(data, 0), })); + static Success = this.Method('Success', data => ({ + messageHash: eventJsonData(data, 0), + })); + static Fail = this.Method('Fail', data => ({ messageHash: eventJsonData(data, 0), outcome: eventData(data, 1), @@ -406,6 +410,16 @@ export class DevAcalaHelper extends AcalaHelper { } } +export class DevPolkadexHelper extends PolkadexHelper { + wait: WaitGroup; + constructor(logger: { log: (msg: any, level: any) => void, level: any }, options: {[key: string]: any} = {}) { + options.helperBase = options.helperBase ?? PolkadexHelper; + + super(logger, options); + this.wait = new WaitGroup(this); + } +} + export class DevKaruraHelper extends DevAcalaHelper {} export class ArrangeGroup { diff --git a/tests/src/util/playgrounds/unique.ts b/tests/src/util/playgrounds/unique.ts index 247e44ff4b..d892338807 100644 --- a/tests/src/util/playgrounds/unique.ts +++ b/tests/src/util/playgrounds/unique.ts @@ -3694,6 +3694,12 @@ class XTokensGroup extends HelperGroup { } } +class PolkadexXcmHelperGroup extends HelperGroup { + async whitelistToken(signer: TSigner, assetId: any) { + await this.helper.executeExtrinsic(signer, 'api.tx.xcmHelper.whitelistToken', [assetId], true); + } +} + class TokensGroup extends HelperGroup { async accounts(address: string, currencyId: any) { const {free} = (await this.helper.callRpc('api.query.tokens.accounts', [address, currencyId])).toJSON() as any; @@ -3977,6 +3983,30 @@ export class AcalaHelper extends XcmChainHelper { } } +export class PolkadexHelper extends XcmChainHelper { + assets: AssetsGroup; + balance: SubstrateBalanceGroup; + xTokens: XTokensGroup; + xcm: XcmGroup; + xcmHelper: PolkadexXcmHelperGroup; + + constructor(logger?: ILogger, options: { [key: string]: any } = {}) { + super(logger, options.helperBase ?? PolkadexHelper); + + this.assets = new AssetsGroup(this); + this.balance = new SubstrateBalanceGroup(this); + this.xTokens = new XTokensGroup(this); + this.xcm = new XcmGroup(this, 'polkadotXcm'); + this.xcmHelper = new PolkadexXcmHelperGroup(this); + } + + getSudo() { + // eslint-disable-next-line @typescript-eslint/naming-convention + const SudoHelperType = SudoHelper(this.helperBase); + return this.clone(SudoHelperType) as T; + } +} + // eslint-disable-next-line @typescript-eslint/naming-convention function ScheduledUniqueHelper(Base: T) { return class extends Base { diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index 0a5f9851c4..b1050d8ef8 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -16,14 +16,16 @@ import {IKeyringPair} from '@polkadot/types/types'; import config from '../config'; -import {itSub, expect, describeXCM, usingPlaygrounds, usingAcalaPlaygrounds, usingRelayPlaygrounds, usingMoonbeamPlaygrounds, usingStatemintPlaygrounds, usingAstarPlaygrounds} from '../util'; +import {itSub, expect, describeXCM, usingPlaygrounds, usingAcalaPlaygrounds, usingRelayPlaygrounds, usingMoonbeamPlaygrounds, usingStatemintPlaygrounds, usingAstarPlaygrounds, usingPolkadexPlaygrounds} from '../util'; import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; +import {nToBigInt} from '@polkadot/util'; const UNIQUE_CHAIN = +(process.env.RELAY_UNIQUE_ID || 2037); const STATEMINT_CHAIN = +(process.env.RELAY_STATEMINT_ID || 1000); const ACALA_CHAIN = +(process.env.RELAY_ACALA_ID || 2000); const MOONBEAM_CHAIN = +(process.env.RELAY_MOONBEAM_ID || 2004); const ASTAR_CHAIN = +(process.env.RELAY_ASTAR_ID || 2006); +const POLKADEX_CHAIN = +(process.env.RELAY_POLKADEX_ID || 2040); const STATEMINT_PALLET_INSTANCE = 50; @@ -32,6 +34,7 @@ const statemintUrl = config.statemintUrl; const acalaUrl = config.acalaUrl; const moonbeamUrl = config.moonbeamUrl; const astarUrl = config.astarUrl; +const polkadexUrl = config.polkadexUrl; const RELAY_DECIMALS = 12; const STATEMINT_DECIMALS = 12; @@ -791,6 +794,193 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Acala', () => { }); }); +describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { + let alice: IKeyringPair; + let randomAccount: IKeyringPair; + let unqFees: bigint; + let balanceUniqueTokenInit: bigint; + let balanceUniqueTokenMiddle: bigint; + let balanceUniqueTokenFinal: bigint; + const maxWaitBlocks = 6; + + const uniqueAssetId = { + Concrete: { + parents: 1, + interior: { + X1: { + Parachain: UNIQUE_CHAIN, + }, + }, + }, + }; + + before(async () => { + await usingPlaygrounds(async (helper, privateKey) => { + alice = await privateKey('//Alice'); + [randomAccount] = await helper.arrange.createAccounts([0n], alice); + + // Set the default version to wrap the first message to other chains. + await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); + }); + + await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { + const isWhitelisted = ((await helper.callRpc('api.query.xcmHelper.whitelistedTokens', [])).toJSON() as []) + .map(nToBigInt).length != 0; + + if(!isWhitelisted) { + await helper.getSudo().xcmHelper.whitelistToken(alice, uniqueAssetId); + } + + await helper.balance.transferToSubstrate(alice, randomAccount.address, 10000000000000n); + }); + + await usingPlaygrounds(async (helper) => { + await helper.balance.transferToSubstrate(alice, randomAccount.address, 10n * TRANSFER_AMOUNT); + balanceUniqueTokenInit = await helper.balance.getSubstrate(randomAccount.address); + }); + }); + + itSub('Should connect and send UNQ to Polkadex', async ({helper}) => { + + const destination = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: POLKADEX_CHAIN, + }, + }, + }, + }; + + const beneficiary = { + V2: { + parents: 0, + interior: { + X1: { + AccountId32: { + network: 'Any', + id: randomAccount.addressRaw, + }, + }, + }, + }, + }; + + const assets = { + V2: [ + { + id: { + Concrete: { + parents: 0, + interior: 'Here', + }, + }, + fun: { + Fungible: TRANSFER_AMOUNT, + }, + }, + ], + }; + + const feeAssetItem = 0; + + await helper.xcm.limitedReserveTransferAssets(randomAccount, destination, beneficiary, assets, feeAssetItem, 'Unlimited'); + const messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + balanceUniqueTokenMiddle = await helper.balance.getSubstrate(randomAccount.address); + + unqFees = balanceUniqueTokenInit - balanceUniqueTokenMiddle - TRANSFER_AMOUNT; + console.log('[Unique -> Polkadex] transaction fees on Unique: %s UNQ', helper.util.bigIntToDecimals(unqFees)); + expect(unqFees > 0n, 'Negative fees UNQ, looks like nothing was transferred').to.be.true; + + await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash); + }); + }); + + + itSub('Should connect to Polkadex and send UNQ back', async ({helper}) => { + + const uniqueMultilocation = { + V2: { + parents: 1, + interior: { + X1: {Parachain: UNIQUE_CHAIN}, + }, + }, + }; + + const xcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( + randomAccount.addressRaw, + { + Concrete: { + parents: 0, + interior: 'Here', + }, + }, + TRANSFER_AMOUNT, + ); + + let xcmProgramSent: any; + + + await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { + await helper.getSudo().xcm.send(alice, uniqueMultilocation, xcmProgram); + + xcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + }); + + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Success, event => event.messageHash == xcmProgramSent.messageHash); + + balanceUniqueTokenFinal = await helper.balance.getSubstrate(randomAccount.address); + + expect(balanceUniqueTokenFinal).to.be.equal(balanceUniqueTokenInit - unqFees); + }); + + itSub('Polkadex can send only up to its balance', async ({helper}) => { + const polkadexBalance = 10000n * (10n ** UNQ_DECIMALS); + const polkadexSovereignAccount = helper.address.paraSiblingSovereignAccount(POLKADEX_CHAIN); + await helper.getSudo().balance.setBalanceSubstrate(alice, polkadexSovereignAccount, polkadexBalance); + const moreThanPolkadexHas = 2n * polkadexBalance; + + const targetAccount = helper.arrange.createEmptyAccount(); + + const uniqueMultilocation = { + V2: { + parents: 1, + interior: { + X1: {Parachain: UNIQUE_CHAIN}, + }, + }, + }; + + const maliciousXcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( + targetAccount.addressRaw, + { + Concrete: { + parents: 0, + interior: 'Here', + }, + }, + moreThanPolkadexHas, + ); + + let maliciousXcmProgramSent: any; + + + await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { + await helper.getSudo().xcm.send(alice, uniqueMultilocation, maliciousXcmProgram); + + maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + }); + + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == maliciousXcmProgramSent.messageHash); + + const targetAccountBalance = await helper.balance.getSubstrate(targetAccount.address); + expect(targetAccountBalance).to.be.equal(0n); + }); +}); + // These tests are relevant only when // the the corresponding foreign assets are not registered describeXCM('[XCM] Integration test: Unique rejects non-native tokens', () => { @@ -930,6 +1120,31 @@ describeXCM('[XCM] Integration test: Unique rejects non-native tokens', () => { await expectFailedToTransact(helper, messageSent); }); + + itSub('Unique rejects PDX tokens from Polkadex', async ({helper}) => { + + const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( + helper.arrange.createEmptyAccount().addressRaw, + { + Concrete: { + parents: 1, + interior: { + X1: { + Parachain: POLKADEX_CHAIN, + }, + }, + }, + }, + testAmount, + ); + + await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { + await helper.getSudo().xcm.send(alice, uniqueParachainMultilocation, maliciousXcmProgramFullId); + messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + }); + + await expectFailedToTransact(helper, messageSent); + }); }); describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { From 9d3aabb4c1a90f45cb08c932705c1a11f4b397b2 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 11 Sep 2023 20:41:47 +0700 Subject: [PATCH 002/143] test(polkadex): added reserve malicious test --- tests/src/xcm/xcmUnique.test.ts | 103 +++++++++++++++++++++++++++++--- vendor/baedeker-library | 1 + 2 files changed, 96 insertions(+), 8 deletions(-) create mode 160000 vendor/baedeker-library diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index b1050d8ef8..75eeedbfb0 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -56,7 +56,15 @@ const USDT_ASSET_METADATA_MINIMAL_BALANCE = 1n; const USDT_ASSET_AMOUNT = 10_000_000_000_000_000_000_000_000n; const SAFE_XCM_VERSION = 2; - +const maxWaitBlocks = 6; +const expectFailedToTransact = async (helper: DevUniqueHelper, messageSent: any) => { + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash + && event.outcome.isFailedToTransactAsset); +}; +const expectUntrustedReserveLocationFail = async (helper: DevUniqueHelper, messageSent: any) => { + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash + && event.outcome.isUntrustedReserveLocation); +}; describeXCM('[XCM] Integration test: Exchanging USDT with Statemint', () => { let alice: IKeyringPair; let bob: IKeyringPair; @@ -894,6 +902,16 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { expect(unqFees > 0n, 'Negative fees UNQ, looks like nothing was transferred').to.be.true; await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { + /* + Since only the parachain part of the Polkadex + infrastructure is launched (without their + solochain validators), processing incoming + assets will lead to an error. + This error indicates that the Polkadex chain + received a message from the Unique network, + since the hash is being checked to ensure + it matches what was sent. + */ await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash); }); }); @@ -914,8 +932,10 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { randomAccount.addressRaw, { Concrete: { - parents: 0, - interior: 'Here', + parents: 1, + interior: { + X1: {Parachain: UNIQUE_CHAIN}, + }, }, }, TRANSFER_AMOUNT, @@ -974,11 +994,81 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); }); - await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == maliciousXcmProgramSent.messageHash); + await expectFailedToTransact(helper, maliciousXcmProgramSent); const targetAccountBalance = await helper.balance.getSubstrate(targetAccount.address); expect(targetAccountBalance).to.be.equal(0n); }); + + itSub('Should not accept reserve transfer of UNQ from Acala', async ({helper}) => { + const testAmount = 10_000n * (10n ** UNQ_DECIMALS); + const targetAccount = helper.arrange.createEmptyAccount(); + + const uniqueMultilocation = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: UNIQUE_CHAIN, + }, + }, + }, + }; + + const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( + targetAccount.addressRaw, + { + Concrete: { + parents: 1, + interior: { + X1: { + Parachain: UNIQUE_CHAIN, + }, + }, + }, + }, + testAmount, + ); + + const maliciousXcmProgramHereId = helper.arrange.makeXcmProgramReserveAssetDeposited( + targetAccount.addressRaw, + { + Concrete: { + parents: 0, + interior: 'Here', + }, + }, + testAmount, + ); + + let maliciousXcmProgramFullIdSent: any; + let maliciousXcmProgramHereIdSent: any; + const maxWaitBlocks = 3; + + // Try to trick Unique using full UNQ identification + await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { + await helper.getSudo().xcm.send(alice, uniqueMultilocation, maliciousXcmProgramFullId); + + maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + }); + + await expectUntrustedReserveLocationFail(helper, maliciousXcmProgramFullIdSent); + + let accountBalance = await helper.balance.getSubstrate(targetAccount.address); + expect(accountBalance).to.be.equal(0n); + + // Try to trick Unique using shortened UNQ identification + await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { + await helper.getSudo().xcm.send(alice, uniqueMultilocation, maliciousXcmProgramHereId); + + maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + }); + + await expectUntrustedReserveLocationFail(helper, maliciousXcmProgramHereIdSent); + + accountBalance = await helper.balance.getSubstrate(targetAccount.address); + expect(accountBalance).to.be.equal(0n); + }); }); // These tests are relevant only when @@ -1058,10 +1148,7 @@ describeXCM('[XCM] Integration test: Unique rejects non-native tokens', () => { }); }); - const expectFailedToTransact = async (helper: DevUniqueHelper, messageSent: any) => { - await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash - && event.outcome.isFailedToTransactAsset); - }; + itSub('Unique rejects ACA tokens from Acala', async ({helper}) => { await usingAcalaPlaygrounds(acalaUrl, async (helper) => { diff --git a/vendor/baedeker-library b/vendor/baedeker-library new file mode 160000 index 0000000000..ae5678e26a --- /dev/null +++ b/vendor/baedeker-library @@ -0,0 +1 @@ +Subproject commit ae5678e26ab557a703a379444dd0a22bf640728a From 7dbcac3e71d51295ed37a281fa4fcbe636998065 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 11 Sep 2023 20:51:20 +0700 Subject: [PATCH 003/143] doc(polkadex CI): added comment --- tests/src/xcm/xcmUnique.test.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index 75eeedbfb0..65df0e67c7 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -834,7 +834,11 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { const isWhitelisted = ((await helper.callRpc('api.query.xcmHelper.whitelistedTokens', [])).toJSON() as []) .map(nToBigInt).length != 0; - + /* + Check whether the Unique token has been added + to the whitelist, since an error will occur + if it is added again. + */ if(!isWhitelisted) { await helper.getSudo().xcmHelper.whitelistToken(alice, uniqueAssetId); } From e6da5c9ea2b864b18c4b0e241607df7a5ad4ae45 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Tue, 12 Sep 2023 10:25:51 +0700 Subject: [PATCH 004/143] test(Polkadex CI): add comment, refactor --- tests/src/xcm/xcmUnique.test.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index 65df0e67c7..1017a6ba5f 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -832,12 +832,14 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { }); await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { - const isWhitelisted = ((await helper.callRpc('api.query.xcmHelper.whitelistedTokens', [])).toJSON() as []) + const isWhitelisted = ((await helper.callRpc('api.query.xcmHelper.whitelistedTokens', [])) + .toJSON() as []) .map(nToBigInt).length != 0; /* Check whether the Unique token has been added to the whitelist, since an error will occur - if it is added again. + if it is added again. Needed for debugging + when this test is run multiple times. */ if(!isWhitelisted) { await helper.getSudo().xcmHelper.whitelistToken(alice, uniqueAssetId); From d7ef52e5400f39f912078092e5051ef6be567949 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Tue, 12 Sep 2023 10:44:58 +0700 Subject: [PATCH 005/143] fix: remove baedeker --- vendor/baedeker-library | 1 - 1 file changed, 1 deletion(-) delete mode 160000 vendor/baedeker-library diff --git a/vendor/baedeker-library b/vendor/baedeker-library deleted file mode 160000 index ae5678e26a..0000000000 --- a/vendor/baedeker-library +++ /dev/null @@ -1 +0,0 @@ -Subproject commit ae5678e26ab557a703a379444dd0a22bf640728a From 8b26223a58751a2463cf85e95360a549b98751f7 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Tue, 12 Sep 2023 15:49:02 +0700 Subject: [PATCH 006/143] test: fix mocha settings --- tests/.mocha.env | 20 -------------------- tests/.vscode/settings.json | 1 - 2 files changed, 21 deletions(-) delete mode 100644 tests/.mocha.env diff --git a/tests/.mocha.env b/tests/.mocha.env deleted file mode 100644 index f3956f59e7..0000000000 --- a/tests/.mocha.env +++ /dev/null @@ -1,20 +0,0 @@ -RELAY_ACALA_ID=1002 -RELAY_ASTAR_ID=1005 -RELAY_MOONBEAM_ID=1003 -RELAY_POLKADEX_ID=1006 -RELAY_STATEMINT_ID=1004 -RELAY_UNIQUE_ID=1001 -RELAY_HTTP_URL=http://127.0.0.1:9699/relay/ -RELAY_ACALA_HTTP_URL=http://127.0.0.1:9699/relay-acala/ -RELAY_ASTAR_HTTP_URL=http://127.0.0.1:9699/relay-astar/ -RELAY_MOONBEAM_HTTP_URL=http://127.0.0.1:9699/relay-moonbeam/ -RELAY_POLKADEX_HTTP_URL=http://127.0.0.1:9699/relay-polkadex/ -RELAY_STATEMINT_HTTP_URL=http://127.0.0.1:9699/relay-statemint/ -RELAY_UNIQUE_HTTP_URL=http://127.0.0.1:9699/relay-unique/ -RELAY_URL=ws://127.0.0.1:9699/relay/ -RELAY_ACALA_URL=ws://127.0.0.1:9699/relay-acala/ -RELAY_ASTAR_URL=ws://127.0.0.1:9699/relay-astar/ -RELAY_MOONBEAM_URL=ws://127.0.0.1:9699/relay-moonbeam/ -RELAY_POLKADEX_URL=ws://127.0.0.1:9699/relay-polkadex/ -RELAY_STATEMINT_URL=ws://127.0.0.1:9699/relay-statemint/ -RELAY_UNIQUE_URL=ws://127.0.0.1:9699/relay-unique/ diff --git a/tests/.vscode/settings.json b/tests/.vscode/settings.json index de3123d97b..f738082dbb 100644 --- a/tests/.vscode/settings.json +++ b/tests/.vscode/settings.json @@ -4,7 +4,6 @@ "RUN_XCM_TESTS": "1" }, "mochaExplorer.files": "src/**/*.test.ts", - "mochaExplorer.envPath": ".mocha.env", "mochaExplorer.require": "ts-node/register", "eslint.format.enable": true, "[javascript]": { From 74dbf886b6824a6b41907d6cedb810332ecaa4fb Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 12 Sep 2023 10:59:25 +0200 Subject: [PATCH 007/143] Update tests/src/xcm/xcmUnique.test.ts --- tests/src/xcm/xcmUnique.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index 1017a6ba5f..90e3baf299 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -1006,7 +1006,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { expect(targetAccountBalance).to.be.equal(0n); }); - itSub('Should not accept reserve transfer of UNQ from Acala', async ({helper}) => { + itSub('Should not accept reserve transfer of UNQ from Polkadex', async ({helper}) => { const testAmount = 10_000n * (10n ** UNQ_DECIMALS); const targetAccount = helper.arrange.createEmptyAccount(); From d1328b9e60c7fcad077cd9bde7febb727d6cad6f Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 12 Sep 2023 11:58:09 +0200 Subject: [PATCH 008/143] fix: pallet-presence test --- tests/src/pallet-presence.test.ts | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/src/pallet-presence.test.ts b/tests/src/pallet-presence.test.ts index 6f41df7c2a..cbe51ece0c 100644 --- a/tests/src/pallet-presence.test.ts +++ b/tests/src/pallet-presence.test.ts @@ -61,7 +61,8 @@ const consensusPallets = [ describe('Pallet presence', () => { before(async () => { await usingPlaygrounds(async helper => { - const chain = await helper.callRpc('api.rpc.system.chain', []); + const runtimeVersion = await helper.callRpc('api.rpc.state.getRuntimeVersion', []); + const chain = runtimeVersion.specName; const refungible = 'refungible'; const foreignAssets = 'foreignassets'; @@ -81,7 +82,7 @@ describe('Pallet presence', () => { ]; const testUtils = 'testutils'; - if(chain.eq('OPAL by UNIQUE')) { + if(chain.eq('opal')) { requiredPallets.push( refungible, foreignAssets, @@ -91,7 +92,7 @@ describe('Pallet presence', () => { ...preimage, ...governance, ); - } else if(chain.eq('QUARTZ by UNIQUE') || chain.eq('SAPPHIRE by UNIQUE')) { + } else if(chain.eq('quartz') || chain.eq('sapphire')) { requiredPallets.push( refungible, appPromotion, @@ -100,7 +101,7 @@ describe('Pallet presence', () => { ...preimage, ...governance, ); - } else if(chain.eq('UNIQUE')) { + } else if(chain.eq('unique')) { // Insert Unique additional pallets here requiredPallets.push( refungible, From f8b8c4f581f9bab456151abe4c48126636ed61e0 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 12 Sep 2023 13:59:21 +0200 Subject: [PATCH 009/143] feat: add pallet-utility --- Cargo.toml | 1 + runtime/common/config/substrate.rs | 11 +++++++++-- runtime/common/construct_runtime.rs | 2 ++ runtime/opal/Cargo.toml | 1 + runtime/quartz/Cargo.toml | 1 + runtime/unique/Cargo.toml | 1 + 6 files changed, 15 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a942d9877f..5aeeb8bc53 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -119,6 +119,7 @@ pallet-session = { default-features = false, git = "https://github.com/paritytec pallet-state-trie-migration = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } pallet-sudo = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } pallet-timestamp = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +pallet-utility = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } pallet-transaction-payment = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } pallet-transaction-payment-rpc-runtime-api = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } diff --git a/runtime/common/config/substrate.rs b/runtime/common/config/substrate.rs index 2dd6bb2c1c..c40a2e33e8 100644 --- a/runtime/common/config/substrate.rs +++ b/runtime/common/config/substrate.rs @@ -35,8 +35,8 @@ use frame_system::{ }; use pallet_transaction_payment::{Multiplier, ConstFeeMultiplier}; use crate::{ - runtime_common::DealWithFees, Runtime, RuntimeEvent, RuntimeCall, RuntimeOrigin, PalletInfo, - System, Balances, SS58Prefix, Version, + runtime_common::DealWithFees, Runtime, RuntimeEvent, RuntimeCall, RuntimeOrigin, OriginCaller, + PalletInfo, System, Balances, SS58Prefix, Version, }; use up_common::{types::*, constants::*}; use sp_std::vec; @@ -248,3 +248,10 @@ impl pallet_aura::Config for Runtime { type DisabledValidators = (); type MaxAuthorities = MaxAuthorities; } + +impl pallet_utility::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type PalletsOrigin = OriginCaller; + type WeightInfo = pallet_utility::weights::SubstrateWeight; +} diff --git a/runtime/common/construct_runtime.rs b/runtime/common/construct_runtime.rs index 7ceee157e4..f059ccabb1 100644 --- a/runtime/common/construct_runtime.rs +++ b/runtime/common/construct_runtime.rs @@ -136,6 +136,8 @@ macro_rules! construct_runtime { BalancesAdapter: pallet_balances_adapter = 155, + Utility: pallet_utility = 156, + #[cfg(feature = "pallet-test-utils")] TestUtils: pallet_test_utils = 255, } diff --git a/runtime/opal/Cargo.toml b/runtime/opal/Cargo.toml index 7c559bc8d3..54279fbe47 100644 --- a/runtime/opal/Cargo.toml +++ b/runtime/opal/Cargo.toml @@ -263,6 +263,7 @@ pallet-session = { workspace = true } pallet-state-trie-migration = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } +pallet-utility = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-treasury = { workspace = true } diff --git a/runtime/quartz/Cargo.toml b/runtime/quartz/Cargo.toml index cebd513948..f5365495bb 100644 --- a/runtime/quartz/Cargo.toml +++ b/runtime/quartz/Cargo.toml @@ -252,6 +252,7 @@ pallet-session = { workspace = true } pallet-state-trie-migration = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } +pallet-utility = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-treasury = { workspace = true } diff --git a/runtime/unique/Cargo.toml b/runtime/unique/Cargo.toml index 97956f3c87..09d1c3a0a0 100644 --- a/runtime/unique/Cargo.toml +++ b/runtime/unique/Cargo.toml @@ -255,6 +255,7 @@ pallet-session = { workspace = true } pallet-state-trie-migration = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } +pallet-utility = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-treasury = { workspace = true } From 749ee434a5986436036ea3dbd951f613d1c09065 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 12 Sep 2023 15:29:49 +0200 Subject: [PATCH 010/143] fix: techcomm prime can be set by council --- runtime/common/config/governance/technical_committee.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/common/config/governance/technical_committee.rs b/runtime/common/config/governance/technical_committee.rs index 8efa2ab214..33957316e6 100644 --- a/runtime/common/config/governance/technical_committee.rs +++ b/runtime/common/config/governance/technical_committee.rs @@ -38,7 +38,7 @@ impl pallet_membership::Config for Runtime { type RemoveOrigin = RootOrMoreThanHalfCouncil; type SwapOrigin = RootOrMoreThanHalfCouncil; type ResetOrigin = EnsureRoot; - type PrimeOrigin = EnsureRoot; + type PrimeOrigin = RootOrMoreThanHalfCouncil; type MembershipInitialized = TechnicalCommittee; type MembershipChanged = TechnicalCommittee; type MaxMembers = TechnicalMaxMembers; From 58444d8f446733dc64955a6ac92d758d1f7a9a50 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 12 Sep 2023 15:30:09 +0200 Subject: [PATCH 011/143] feat: add pallet-utility playground helper --- tests/src/util/playgrounds/unique.ts | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tests/src/util/playgrounds/unique.ts b/tests/src/util/playgrounds/unique.ts index d892338807..e2e4456e75 100644 --- a/tests/src/util/playgrounds/unique.ts +++ b/tests/src/util/playgrounds/unique.ts @@ -3381,6 +3381,10 @@ class DemocracyGroup extends HelperGroup { return this.helper.constructApiCall('api.tx.democracy.externalProposeDefault', [{Inline: proposalCall.method.toHex()}]); } + externalProposeDefaultWithPreimageCall(preimage: string) { + return this.helper.constructApiCall('api.tx.democracy.externalProposeDefault', [{Legacy: preimage}]); + } + // ... and blacklist external proposal hash. vetoExternal(signer: TSigner, proposalHash: string) { return this.helper.executeExtrinsic(signer, 'api.tx.democracy.vetoExternal', [proposalHash]); @@ -3733,6 +3737,20 @@ class AssetsGroup extends HelperGroup { } } +class UtilityGroup extends HelperGroup { + async batch(signer: TSigner, txs: any[]) { + return await this.helper.executeExtrinsic(signer, 'api.tx.utility.batch', [txs]); + } + + async batchAll(signer: TSigner, txs: any[]) { + return await this.helper.executeExtrinsic(signer, 'api.tx.utility.batchAll', [txs]); + } + + batchAllCall(txs: any[]) { + return this.helper.constructApiCall('api.tx.utility.batchAll', [txs]); + } +} + class AcalaAssetRegistryGroup extends HelperGroup { async registerForeignAsset(signer: TSigner, destination: any, metadata: AcalaAssetMetadata) { await this.helper.executeExtrinsic(signer, 'api.tx.assetRegistry.registerForeignAsset', [destination, metadata], true); @@ -3835,6 +3853,7 @@ export class UniqueHelper extends ChainHelperBase { xcm: XcmGroup; xTokens: XTokensGroup; tokens: TokensGroup; + utility: UtilityGroup; constructor(logger?: ILogger, options: { [key: string]: any } = {}) { super(logger, options.helperBase ?? UniqueHelper); @@ -3865,6 +3884,7 @@ export class UniqueHelper extends ChainHelperBase { this.xcm = new XcmGroup(this, 'polkadotXcm'); this.xTokens = new XTokensGroup(this); this.tokens = new TokensGroup(this); + this.utility = new UtilityGroup(this); } getSudo() { From a6d86512c19924c15b41a94ad107c2d9b4d067c1 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 12 Sep 2023 15:30:38 +0200 Subject: [PATCH 012/143] test: governance init --- tests/src/governance/init.test.ts | 125 ++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 tests/src/governance/init.test.ts diff --git a/tests/src/governance/init.test.ts b/tests/src/governance/init.test.ts new file mode 100644 index 0000000000..85e4fe67da --- /dev/null +++ b/tests/src/governance/init.test.ts @@ -0,0 +1,125 @@ +import {IKeyringPair} from '@polkadot/types/types'; +import {usingPlaygrounds, itSub, expect, Pallets, requirePalletsOrSkip, describeGov} from '../util'; +import {Event} from '../util/playgrounds/unique.dev'; +import {ICounselors, democracyLaunchPeriod, democracyVotingPeriod, ITechComms, democracyEnactmentPeriod, clearCouncil, clearTechComm} from './util'; + +describeGov('Governance: Initialization', () => { + let donor: IKeyringPair; + let sudoer: IKeyringPair; + let counselors: ICounselors; + let techcomms: ITechComms; + + before(async function() { + await usingPlaygrounds(async (helper, privateKey) => { + requirePalletsOrSkip(this, helper, [Pallets.Democracy, Pallets.Council, Pallets.TechnicalCommittee]); + + const councilMembers = await helper.council.membership.getMembers(); + const techcommMembers = await helper.technicalCommittee.membership.getMembers(); + expect(councilMembers.length == 0, 'The Council must be empty before the Gov Init'); + expect(techcommMembers.length == 0, 'The Technical Commettee must be empty before the Gov Init'); + + donor = await privateKey({url: import.meta.url}); + sudoer = await privateKey('//Alice'); + + const [alex, ildar, charu, filip, irina] = await helper.arrange.createAccounts([10_000n, 10_000n, 10_000n, 10_000n, 10_000n], donor); + counselors = { + alex, + ildar, + charu, + filip, + irina, + }; + + const [greg, andy, constantine] = await helper.arrange.createAccounts([10_000n, 10_000n, 10_000n], donor); + techcomms = { + greg, + andy, + constantine, + }; + }); + }); + + itSub('Initialize Governance', async ({helper}) => { + console.log('\t- Setup the Prime of the Council via sudo'); + await helper.getSudo().utility.batchAll(sudoer, [ + helper.council.membership.addMemberCall(counselors.alex.address), + helper.council.membership.setPrimeCall(counselors.alex.address), + ]); + + let councilMembers = await helper.council.membership.getMembers(); + const councilPrime = await helper.council.collective.getPrimeMember(); + expect(councilMembers).to.be.deep.equal([counselors.alex.address]); + expect(councilPrime).to.be.equal(counselors.alex.address); + + console.log('\t- The Council Prime initializes the Technical Commettee'); + const councilProposalThreshold = 1; + + await helper.council.collective.propose( + counselors.alex, + helper.utility.batchAllCall([ + helper.technicalCommittee.membership.addMemberCall(techcomms.greg.address), + helper.technicalCommittee.membership.addMemberCall(techcomms.andy.address), + helper.technicalCommittee.membership.addMemberCall(techcomms.constantine.address), + + helper.technicalCommittee.membership.setPrimeCall(techcomms.greg.address), + ]), + councilProposalThreshold, + ); + + const techCommMembers = await helper.technicalCommittee.membership.getMembers(); + const techCommPrime = await helper.technicalCommittee.membership.getPrimeMember(); + const expectedTechComms = [techcomms.greg.address, techcomms.andy.address, techcomms.constantine.address]; + expect(techCommMembers.length).to.be.equal(expectedTechComms.length); + expect(techCommMembers).to.containSubset(expectedTechComms); + expect(techCommPrime).to.be.equal(techcomms.greg.address); + + console.log('\t- The Council Prime initiates a referendum to add counselors'); + const returnPreimageHash = true; + const preimageHash = await helper.preimage.notePreimageFromCall(counselors.alex, helper.utility.batchAllCall([ + helper.council.membership.addMemberCall(counselors.ildar.address), + helper.council.membership.addMemberCall(counselors.charu.address), + helper.council.membership.addMemberCall(counselors.filip.address), + helper.council.membership.addMemberCall(counselors.irina.address), + ]), returnPreimageHash); + + await helper.council.collective.propose( + counselors.alex, + helper.democracy.externalProposeDefaultWithPreimageCall(preimageHash), + councilProposalThreshold, + ); + + console.log('\t- The referendum is being decided'); + const startedEvent = await helper.wait.expectEvent(democracyLaunchPeriod, Event.Democracy.Started); + + await helper.democracy.vote(counselors.filip, startedEvent.referendumIndex, { + Standard: { + vote: { + aye: true, + conviction: 1, + }, + balance: 10_000n, + }, + }); + + const passedReferendumEvent = await helper.wait.expectEvent(democracyVotingPeriod, Event.Democracy.Passed); + expect(passedReferendumEvent.referendumIndex).to.be.equal(startedEvent.referendumIndex); + + await helper.wait.expectEvent(democracyEnactmentPeriod, Event.Scheduler.Dispatched); + + councilMembers = await helper.council.membership.getMembers(); + const expectedCounselors = [ + counselors.alex.address, + counselors.ildar.address, + counselors.charu.address, + counselors.filip.address, + counselors.irina.address, + ]; + expect(councilMembers.length).to.be.equal(expectedCounselors.length); + expect(councilMembers).to.containSubset(expectedCounselors); + }); + + after(async function() { + await clearTechComm(sudoer); + await clearCouncil(sudoer); + }); +}); From 8a54415cf31bd324497565b1bbc46058fab45af6 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 12 Sep 2023 16:18:55 +0200 Subject: [PATCH 013/143] test: governance init fellowship --- tests/src/governance/init.test.ts | 75 ++++++++++++++++++++++++++-- tests/src/util/playgrounds/unique.ts | 8 ++- 2 files changed, 78 insertions(+), 5 deletions(-) diff --git a/tests/src/governance/init.test.ts b/tests/src/governance/init.test.ts index 85e4fe67da..83953ca575 100644 --- a/tests/src/governance/init.test.ts +++ b/tests/src/governance/init.test.ts @@ -1,13 +1,17 @@ import {IKeyringPair} from '@polkadot/types/types'; import {usingPlaygrounds, itSub, expect, Pallets, requirePalletsOrSkip, describeGov} from '../util'; import {Event} from '../util/playgrounds/unique.dev'; -import {ICounselors, democracyLaunchPeriod, democracyVotingPeriod, ITechComms, democracyEnactmentPeriod, clearCouncil, clearTechComm} from './util'; +import {ICounselors, democracyLaunchPeriod, democracyVotingPeriod, ITechComms, democracyEnactmentPeriod, clearCouncil, clearTechComm, clearFellowship} from './util'; describeGov('Governance: Initialization', () => { let donor: IKeyringPair; let sudoer: IKeyringPair; let counselors: ICounselors; let techcomms: ITechComms; + let coreDevs: any; + + const expectedAlexFellowRank = 7; + const expectedFellowRank = 6; before(async function() { await usingPlaygrounds(async (helper, privateKey) => { @@ -21,7 +25,24 @@ describeGov('Governance: Initialization', () => { donor = await privateKey({url: import.meta.url}); sudoer = await privateKey('//Alice'); - const [alex, ildar, charu, filip, irina] = await helper.arrange.createAccounts([10_000n, 10_000n, 10_000n, 10_000n, 10_000n], donor); + const counselorsNum = 5; + const techCommsNum = 3; + const coreDevsNum = 2; + const [ + alex, + ildar, + charu, + filip, + irina, + + greg, + andy, + constantine, + + yaroslav, + daniel, + ] = await helper.arrange.createAccounts(new Array(counselorsNum + techCommsNum + coreDevsNum).fill(10_000n), donor); + counselors = { alex, ildar, @@ -30,26 +51,43 @@ describeGov('Governance: Initialization', () => { irina, }; - const [greg, andy, constantine] = await helper.arrange.createAccounts([10_000n, 10_000n, 10_000n], donor); techcomms = { greg, andy, constantine, }; + + coreDevs = { + yaroslav: yaroslav, + daniel: daniel, + }; }); }); itSub('Initialize Governance', async ({helper}) => { + const promoteFellow = (fellow: string, promotionsNum: number) => { + return new Array(promotionsNum).fill(helper.fellowship.collective.promoteCall(fellow)); + }; + + const expectFellowRank = async (fellow: string, expectedRank: number) => { + expect(await helper.fellowship.collective.getMemberRank(fellow)).to.be.equal(expectedRank); + }; + console.log('\t- Setup the Prime of the Council via sudo'); await helper.getSudo().utility.batchAll(sudoer, [ helper.council.membership.addMemberCall(counselors.alex.address), helper.council.membership.setPrimeCall(counselors.alex.address), + + helper.fellowship.collective.addMemberCall(counselors.alex.address), + ...promoteFellow(counselors.alex.address, expectedAlexFellowRank), ]); let councilMembers = await helper.council.membership.getMembers(); const councilPrime = await helper.council.collective.getPrimeMember(); + const alexFellowRank = await helper.fellowship.collective.getMemberRank(counselors.alex.address); expect(councilMembers).to.be.deep.equal([counselors.alex.address]); expect(councilPrime).to.be.equal(counselors.alex.address); + expect(alexFellowRank).to.be.equal(expectedAlexFellowRank); console.log('\t- The Council Prime initializes the Technical Commettee'); const councilProposalThreshold = 1; @@ -80,6 +118,26 @@ describeGov('Governance: Initialization', () => { helper.council.membership.addMemberCall(counselors.charu.address), helper.council.membership.addMemberCall(counselors.filip.address), helper.council.membership.addMemberCall(counselors.irina.address), + + helper.fellowship.collective.addMemberCall(counselors.charu.address), + helper.fellowship.collective.addMemberCall(counselors.ildar.address), + helper.fellowship.collective.addMemberCall(counselors.irina.address), + helper.fellowship.collective.addMemberCall(counselors.filip.address), + helper.fellowship.collective.addMemberCall(techcomms.greg.address), + helper.fellowship.collective.addMemberCall(techcomms.andy.address), + helper.fellowship.collective.addMemberCall(techcomms.constantine.address), + helper.fellowship.collective.addMemberCall(coreDevs.yaroslav.address), + helper.fellowship.collective.addMemberCall(coreDevs.daniel.address), + + ...promoteFellow(counselors.charu.address, expectedFellowRank), + ...promoteFellow(counselors.ildar.address, expectedFellowRank), + ...promoteFellow(counselors.irina.address, expectedFellowRank), + ...promoteFellow(counselors.filip.address, expectedFellowRank), + ...promoteFellow(techcomms.greg.address, expectedFellowRank), + ...promoteFellow(techcomms.andy.address, expectedFellowRank), + ...promoteFellow(techcomms.constantine.address, expectedFellowRank), + ...promoteFellow(coreDevs.yaroslav.address, expectedFellowRank), + ...promoteFellow(coreDevs.daniel.address, expectedFellowRank), ]), returnPreimageHash); await helper.council.collective.propose( @@ -116,9 +174,20 @@ describeGov('Governance: Initialization', () => { ]; expect(councilMembers.length).to.be.equal(expectedCounselors.length); expect(councilMembers).to.containSubset(expectedCounselors); + + await expectFellowRank(counselors.ildar.address, expectedFellowRank); + await expectFellowRank(counselors.charu.address, expectedFellowRank); + await expectFellowRank(counselors.filip.address, expectedFellowRank); + await expectFellowRank(counselors.irina.address, expectedFellowRank); + await expectFellowRank(techcomms.greg.address, expectedFellowRank); + await expectFellowRank(techcomms.andy.address, expectedFellowRank); + await expectFellowRank(techcomms.constantine.address, expectedFellowRank); + await expectFellowRank(coreDevs.yaroslav.address, expectedFellowRank); + await expectFellowRank(coreDevs.daniel.address, expectedFellowRank); }); after(async function() { + await clearFellowship(sudoer); await clearTechComm(sudoer); await clearCouncil(sudoer); }); diff --git a/tests/src/util/playgrounds/unique.ts b/tests/src/util/playgrounds/unique.ts index e2e4456e75..5f5641a577 100644 --- a/tests/src/util/playgrounds/unique.ts +++ b/tests/src/util/playgrounds/unique.ts @@ -3255,8 +3255,8 @@ class RankedCollectiveGroup extends HelperGroup { return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.promoteMember`, [member]); } - promoteCall(newMember: string) { - return this.helper.constructApiCall(`api.tx.${this.collective}.promoteMember`, [newMember]); + promoteCall(member: string) { + return this.helper.constructApiCall(`api.tx.${this.collective}.promoteMember`, [member]); } demote(signer: TSigner, member: string) { @@ -3275,6 +3275,10 @@ class RankedCollectiveGroup extends HelperGroup { return (await this.helper.getApi().query.fellowshipCollective.members.keys()) .map((key) => key.args[0].toString()); } + + async getMemberRank(member: string) { + return (await this.helper.callRpc('api.query.fellowshipCollective.members', [member])).toJSON().rank; + } } class ReferendaGroup extends HelperGroup { From db9303e6d830919e09428fbbc339cf64ec16abe5 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Wed, 13 Sep 2023 05:12:38 +0000 Subject: [PATCH 014/143] tests: utility in pallet-presence --- tests/src/pallet-presence.test.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/src/pallet-presence.test.ts b/tests/src/pallet-presence.test.ts index cbe51ece0c..31201bfaab 100644 --- a/tests/src/pallet-presence.test.ts +++ b/tests/src/pallet-presence.test.ts @@ -27,6 +27,7 @@ const requiredPallets = [ 'statetriemigration', 'structure', 'system', + 'utility', 'vesting', 'parachainsystem', 'parachaininfo', From 465dd2abfab4a420d7c417001ce4874ab57de9e3 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Wed, 13 Sep 2023 10:12:33 +0200 Subject: [PATCH 015/143] fix: yarn fix --- tests/src/governance/init.test.ts | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/src/governance/init.test.ts b/tests/src/governance/init.test.ts index 83953ca575..7d2a944745 100644 --- a/tests/src/governance/init.test.ts +++ b/tests/src/governance/init.test.ts @@ -41,7 +41,7 @@ describeGov('Governance: Initialization', () => { yaroslav, daniel, - ] = await helper.arrange.createAccounts(new Array(counselorsNum + techCommsNum + coreDevsNum).fill(10_000n), donor); + ] = await helper.arrange.createAccounts(new Array(counselorsNum + techCommsNum + coreDevsNum).fill(10_000n), donor); counselors = { alex, @@ -65,12 +65,10 @@ describeGov('Governance: Initialization', () => { }); itSub('Initialize Governance', async ({helper}) => { - const promoteFellow = (fellow: string, promotionsNum: number) => { - return new Array(promotionsNum).fill(helper.fellowship.collective.promoteCall(fellow)); - }; + const promoteFellow = (fellow: string, promotionsNum: number) => new Array(promotionsNum).fill(helper.fellowship.collective.promoteCall(fellow)); const expectFellowRank = async (fellow: string, expectedRank: number) => { - expect(await helper.fellowship.collective.getMemberRank(fellow)).to.be.equal(expectedRank); + expect(await helper.fellowship.collective.getMemberRank(fellow)).to.be.equal(expectedRank); }; console.log('\t- Setup the Prime of the Council via sudo'); From e0258a2f7a218871d8ceb0f94d9147322a6bd3a7 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Wed, 13 Sep 2023 10:21:36 +0200 Subject: [PATCH 016/143] fix: pallet-utility pass features --- runtime/opal/Cargo.toml | 3 +++ runtime/quartz/Cargo.toml | 3 +++ runtime/unique/Cargo.toml | 3 +++ 3 files changed, 9 insertions(+) diff --git a/runtime/opal/Cargo.toml b/runtime/opal/Cargo.toml index 54279fbe47..03ec7e5700 100644 --- a/runtime/opal/Cargo.toml +++ b/runtime/opal/Cargo.toml @@ -58,6 +58,7 @@ runtime-benchmarks = [ 'pallet-refungible/runtime-benchmarks', 'pallet-structure/runtime-benchmarks', 'pallet-timestamp/runtime-benchmarks', + 'pallet-utility/runtime-benchmarks', 'pallet-unique-scheduler-v2/runtime-benchmarks', 'pallet-unique/runtime-benchmarks', 'pallet-xcm/runtime-benchmarks', @@ -120,6 +121,7 @@ std = [ 'pallet-structure/std', 'pallet-sudo/std', 'pallet-timestamp/std', + 'pallet-utility/std', 'pallet-transaction-payment-rpc-runtime-api/std', 'pallet-transaction-payment/std', 'pallet-treasury/std', @@ -213,6 +215,7 @@ try-runtime = [ 'pallet-sudo/try-runtime', 'pallet-test-utils?/try-runtime', 'pallet-timestamp/try-runtime', + 'pallet-utility/try-runtime', 'pallet-transaction-payment/try-runtime', 'pallet-treasury/try-runtime', 'pallet-unique-scheduler-v2/try-runtime', diff --git a/runtime/quartz/Cargo.toml b/runtime/quartz/Cargo.toml index f5365495bb..8a5ad8012d 100644 --- a/runtime/quartz/Cargo.toml +++ b/runtime/quartz/Cargo.toml @@ -55,6 +55,7 @@ runtime-benchmarks = [ 'pallet-scheduler/runtime-benchmarks', 'pallet-structure/runtime-benchmarks', 'pallet-timestamp/runtime-benchmarks', + 'pallet-utility/runtime-benchmarks', 'pallet-unique/runtime-benchmarks', 'pallet-xcm/runtime-benchmarks', 'sp-runtime/runtime-benchmarks', @@ -120,6 +121,7 @@ std = [ 'pallet-structure/std', 'pallet-sudo/std', 'pallet-timestamp/std', + 'pallet-utility/std', 'pallet-transaction-payment-rpc-runtime-api/std', 'pallet-transaction-payment/std', 'pallet-treasury/std', @@ -203,6 +205,7 @@ try-runtime = [ 'pallet-structure/try-runtime', 'pallet-sudo/try-runtime', 'pallet-timestamp/try-runtime', + 'pallet-utility/try-runtime', 'pallet-transaction-payment/try-runtime', 'pallet-treasury/try-runtime', 'pallet-unique/try-runtime', diff --git a/runtime/unique/Cargo.toml b/runtime/unique/Cargo.toml index 09d1c3a0a0..f39d9d3fde 100644 --- a/runtime/unique/Cargo.toml +++ b/runtime/unique/Cargo.toml @@ -52,6 +52,7 @@ runtime-benchmarks = [ 'pallet-scheduler/runtime-benchmarks', 'pallet-structure/runtime-benchmarks', 'pallet-timestamp/runtime-benchmarks', + 'pallet-utility/runtime-benchmarks', 'pallet-unique/runtime-benchmarks', 'pallet-xcm/runtime-benchmarks', 'sp-runtime/runtime-benchmarks', @@ -118,6 +119,7 @@ std = [ 'pallet-structure/std', 'pallet-sudo/std', 'pallet-timestamp/std', + 'pallet-utility/std', 'pallet-transaction-payment-rpc-runtime-api/std', 'pallet-transaction-payment/std', 'pallet-treasury/std', @@ -205,6 +207,7 @@ try-runtime = [ 'pallet-structure/try-runtime', 'pallet-sudo/try-runtime', 'pallet-timestamp/try-runtime', + 'pallet-utility/try-runtime', 'pallet-transaction-payment/try-runtime', 'pallet-treasury/try-runtime', 'pallet-unique/try-runtime', From e089f01177a82c22b68a9a98629283f27366cf1f Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Wed, 13 Sep 2023 15:09:06 +0200 Subject: [PATCH 017/143] build: bump version to v943061 --- runtime/opal/src/lib.rs | 2 +- runtime/quartz/src/lib.rs | 4 ++-- runtime/unique/src/lib.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/runtime/opal/src/lib.rs b/runtime/opal/src/lib.rs index 76acf8f656..46ff8248a7 100644 --- a/runtime/opal/src/lib.rs +++ b/runtime/opal/src/lib.rs @@ -52,7 +52,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("opal"), impl_name: create_runtime_str!("opal"), authoring_version: 1, - spec_version: 943060, + spec_version: 943061, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, diff --git a/runtime/quartz/src/lib.rs b/runtime/quartz/src/lib.rs index 98406b14af..37fc4d72be 100644 --- a/runtime/quartz/src/lib.rs +++ b/runtime/quartz/src/lib.rs @@ -54,7 +54,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_name: create_runtime_str!("quartz"), authoring_version: 1, - spec_version: 943060, + spec_version: 943061, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, @@ -67,7 +67,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_name: create_runtime_str!("sapphire"), authoring_version: 1, - spec_version: 943060, + spec_version: 943061, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, diff --git a/runtime/unique/src/lib.rs b/runtime/unique/src/lib.rs index 3e0c8b49ea..6ca1f190ce 100644 --- a/runtime/unique/src/lib.rs +++ b/runtime/unique/src/lib.rs @@ -52,7 +52,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("unique"), impl_name: create_runtime_str!("unique"), authoring_version: 1, - spec_version: 943060, + spec_version: 943061, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, From cd116b846ddb94244a2ba1baa3e828a23b97a277 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Wed, 13 Sep 2023 17:14:15 +0200 Subject: [PATCH 018/143] build: update Cargo.lock --- Cargo.lock | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 7e33f307ab..c5dc6c5183 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5994,6 +5994,7 @@ dependencies = [ "pallet-treasury", "pallet-unique", "pallet-unique-scheduler-v2", + "pallet-utility", "pallet-xcm", "parachain-info", "parity-scale-codec", @@ -9549,6 +9550,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-treasury", "pallet-unique", + "pallet-utility", "pallet-xcm", "parachain-info", "parity-scale-codec", @@ -14050,6 +14052,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-treasury", "pallet-unique", + "pallet-utility", "pallet-xcm", "parachain-info", "parity-scale-codec", From 383d45a29a755199568775a1b5c5048c7798a612 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Thu, 14 Sep 2023 10:19:23 +0000 Subject: [PATCH 019/143] refactor(playgorunds): code structure regroup --- .envrc | 2 +- tests/src/util/playgrounds/types.ts | 30 -- tests/src/util/playgrounds/types.xcm.ts | 29 ++ tests/src/util/playgrounds/unique.dev.ts | 101 ++++++- tests/src/util/playgrounds/unique.ts | 348 +---------------------- tests/src/util/playgrounds/unique.xcm.ts | 208 ++++++++++++++ 6 files changed, 345 insertions(+), 373 deletions(-) create mode 100644 tests/src/util/playgrounds/types.xcm.ts create mode 100644 tests/src/util/playgrounds/unique.xcm.ts diff --git a/.envrc b/.envrc index 3114ea17ee..95f7214f35 100644 --- a/.envrc +++ b/.envrc @@ -30,7 +30,7 @@ if test -f .baedeker/.bdk-env/discover.env; then fi echo -e "${GREEN}Baedeker env updated${RESET}" - nginx_id=$(docker compose -f .baedeker/.bdk-env/docker-compose.yml ps --format=json | jq -r '.[] | select(.Service == "nginx") | .ID' -e) + nginx_id=$(docker compose -f .baedeker/.bdk-env/docker-compose.yml ps --format=json | jq -s 'flatten' | jq -r '.[] | select(.Service == "nginx") | .ID' -e) if ! [ $? -eq 0 ]; then echo -e "${RED}Nginx container not found${RESET}" exit 0 diff --git a/tests/src/util/playgrounds/types.ts b/tests/src/util/playgrounds/types.ts index 004fa96acc..26ac76cd92 100644 --- a/tests/src/util/playgrounds/types.ts +++ b/tests/src/util/playgrounds/types.ts @@ -223,36 +223,6 @@ export interface IForeignAssetMetadata { minimalBalance?: bigint, } -export interface MoonbeamAssetInfo { - location: any, - metadata: { - name: string, - symbol: string, - decimals: number, - isFrozen: boolean, - minimalBalance: bigint, - }, - existentialDeposit: bigint, - isSufficient: boolean, - unitsPerSecond: bigint, - numAssetsWeightHint: number, -} - -export interface AcalaAssetMetadata { - name: string, - symbol: string, - decimals: number, - minimalBalance: bigint, -} - -export interface DemocracyStandardAccountVote { - balance: bigint, - vote: { - aye: boolean, - conviction: number, - }, -} - export interface DemocracySplitAccount { aye: bigint, nay: bigint, diff --git a/tests/src/util/playgrounds/types.xcm.ts b/tests/src/util/playgrounds/types.xcm.ts new file mode 100644 index 0000000000..5ac5692fc8 --- /dev/null +++ b/tests/src/util/playgrounds/types.xcm.ts @@ -0,0 +1,29 @@ +export interface AcalaAssetMetadata { + name: string, + symbol: string, + decimals: number, + minimalBalance: bigint, +} + +export interface MoonbeamAssetInfo { + location: any, + metadata: { + name: string, + symbol: string, + decimals: number, + isFrozen: boolean, + minimalBalance: bigint, + }, + existentialDeposit: bigint, + isSufficient: boolean, + unitsPerSecond: bigint, + numAssetsWeightHint: number, +} + +export interface DemocracyStandardAccountVote { + balance: bigint, + vote: { + aye: boolean, + conviction: number, + }, +} \ No newline at end of file diff --git a/tests/src/util/playgrounds/unique.dev.ts b/tests/src/util/playgrounds/unique.dev.ts index 7532a3defe..63bc6f5a3a 100644 --- a/tests/src/util/playgrounds/unique.dev.ts +++ b/tests/src/util/playgrounds/unique.dev.ts @@ -3,16 +3,17 @@ import {stringToU8a} from '@polkadot/util'; import {blake2AsHex, encodeAddress, mnemonicGenerate} from '@polkadot/util-crypto'; -import {UniqueHelper, MoonbeamHelper, ChainHelperBase, AcalaHelper, RelayHelper, WestmintHelper, AstarHelper, PolkadexHelper} from './unique'; +import {UniqueHelper, ChainHelperBase, ChainHelperBaseConstructor} from './unique'; import {ApiPromise, Keyring, WsProvider} from '@polkadot/api'; import * as defs from '../../interfaces/definitions'; import {IKeyringPair} from '@polkadot/types/types'; import {EventRecord} from '@polkadot/types/interfaces'; import {ICrossAccountId, IPovInfo, ITransactionResult, TSigner} from './types'; import {FrameSystemEventRecord, XcmV2TraitsError} from '@polkadot/types/lookup'; -import {VoidFn} from '@polkadot/api/types'; +import {SignerOptions, VoidFn} from '@polkadot/api/types'; import {Pallets} from '..'; import {spawnSync} from 'child_process'; +import {AcalaHelper, AstarHelper, MoonbeamHelper, PolkadexHelper, RelayHelper, WestmintHelper} from './unique.xcm'; export class SilentLogger { log(_msg: any, _level: any): void { } @@ -260,6 +261,80 @@ export class Event { }; } +// eslint-disable-next-line @typescript-eslint/naming-convention +export function SudoHelper(Base: T) { + return class extends Base { + constructor(...args: any[]) { + super(...args); + } + + async executeExtrinsic( + sender: IKeyringPair, + extrinsic: string, + params: any[], + expectSuccess?: boolean, + options: Partial | null = null, + ): Promise { + const call = this.constructApiCall(extrinsic, params); + const result = await super.executeExtrinsic( + sender, + 'api.tx.sudo.sudo', + [call], + expectSuccess, + options, + ); + + if(result.status === 'Fail') return result; + + const data = (result.result.events.find(x => x.event.section == 'sudo' && x.event.method == 'Sudid')?.event.data as any).sudoResult; + if(data.isErr) { + if(data.asErr.isModule) { + const error = (result.result.events[1].event.data as any).sudoResult.asErr.asModule; + const metaError = super.getApi()?.registry.findMetaError(error); + throw new Error(`${metaError.section}.${metaError.name}`); + } else if(data.asErr.isToken) { + throw new Error(`Token: ${data.asErr.asToken}`); + } + // May be [object Object] in case of unhandled non-unit enum + throw new Error(`Misc: ${data.asErr.toHuman()}`); + } + return result; + } + async executeExtrinsicUncheckedWeight( + sender: IKeyringPair, + extrinsic: string, + params: any[], + expectSuccess?: boolean, + options: Partial | null = null, + ): Promise { + const call = this.constructApiCall(extrinsic, params); + const result = await super.executeExtrinsic( + sender, + 'api.tx.sudo.sudoUncheckedWeight', + [call, {refTime: 0, proofSize: 0}], + expectSuccess, + options, + ); + + if(result.status === 'Fail') return result; + + const data = (result.result.events.find(x => x.event.section == 'sudo' && x.event.method == 'Sudid')?.event.data as any).sudoResult; + if(data.isErr) { + if(data.asErr.isModule) { + const error = (result.result.events[1].event.data as any).sudoResult.asErr.asModule; + const metaError = super.getApi()?.registry.findMetaError(error); + throw new Error(`${metaError.section}.${metaError.name}`); + } else if(data.asErr.isToken) { + throw new Error(`Token: ${data.asErr.asToken}`); + } + // May be [object Object] in case of unhandled non-unit enum + throw new Error(`Misc: ${data.asErr.toHuman()}`); + } + return result; + } + }; +} + export class DevUniqueHelper extends UniqueHelper { /** * Arrange methods for tests @@ -326,6 +401,11 @@ export class DevUniqueHelper extends UniqueHelper { this.network = await UniqueHelper.detectNetwork(this.api); this.wsEndpoint = wsEndpoint; } + getSudo() { + // eslint-disable-next-line @typescript-eslint/naming-convention + const SudoHelperType = SudoHelper(this.helperBase); + return this.clone(SudoHelperType) as T; + } } export class DevRelayHelper extends RelayHelper { @@ -386,6 +466,12 @@ export class DevAstarHelper extends AstarHelper { super(logger, options); this.wait = new WaitGroup(this); } + + getSudo() { + // eslint-disable-next-line @typescript-eslint/naming-convention + const SudoHelperType = SudoHelper(this.helperBase); + return this.clone(SudoHelperType) as T; + } } export class DevShidenHelper extends AstarHelper { @@ -408,6 +494,11 @@ export class DevAcalaHelper extends AcalaHelper { super(logger, options); this.wait = new WaitGroup(this); } + getSudo() { + // eslint-disable-next-line @typescript-eslint/naming-convention + const SudoHelperType = SudoHelper(this.helperBase); + return this.clone(SudoHelperType) as T; + } } export class DevPolkadexHelper extends PolkadexHelper { @@ -418,6 +509,12 @@ export class DevPolkadexHelper extends PolkadexHelper { super(logger, options); this.wait = new WaitGroup(this); } + + getSudo() { + // eslint-disable-next-line @typescript-eslint/naming-convention + const SudoHelperType = SudoHelper(this.helperBase); + return this.clone(SudoHelperType) as T; + } } export class DevKaruraHelper extends DevAcalaHelper {} diff --git a/tests/src/util/playgrounds/unique.ts b/tests/src/util/playgrounds/unique.ts index 5f5641a577..19bb7cf5fd 100644 --- a/tests/src/util/playgrounds/unique.ts +++ b/tests/src/util/playgrounds/unique.ts @@ -40,9 +40,6 @@ import { TSubstrateAccount, TNetworks, IForeignAssetMetadata, - AcalaAssetMetadata, - MoonbeamAssetInfo, - DemocracyStandardAccountVote, IEthCrossAccountId, IPhasicEvent, } from './types'; @@ -818,7 +815,7 @@ export class ChainHelperBase { } -class HelperGroup { +export class HelperGroup { helper: T; constructor(uniqueHelper: T) { @@ -2373,7 +2370,7 @@ class ChainGroup extends HelperGroup { } } -class SubstrateBalanceGroup extends HelperGroup { +export class SubstrateBalanceGroup extends HelperGroup { /** * Get substrate address balance * @param address substrate address @@ -2440,7 +2437,7 @@ class SubstrateBalanceGroup extends HelperGroup { } } -class EthereumBalanceGroup extends HelperGroup { +export class EthereumBalanceGroup extends HelperGroup { /** * Get ethereum address balance * @param address ethereum address @@ -3595,7 +3592,7 @@ class ForeignAssetsGroup extends HelperGroup { } } -class XcmGroup extends HelperGroup { +export class XcmGroup extends HelperGroup { palletName: string; constructor(helper: T, palletName: string) { @@ -3688,7 +3685,7 @@ class XcmGroup extends HelperGroup { } } -class XTokensGroup extends HelperGroup { +export class XTokensGroup extends HelperGroup { async transfer(signer: TSigner, currencyId: any, amount: bigint, destination: any, destWeight: any) { await this.helper.executeExtrinsic(signer, 'api.tx.xTokens.transfer', [currencyId, amount, destination, destWeight], true); } @@ -3702,20 +3699,16 @@ class XTokensGroup extends HelperGroup { } } -class PolkadexXcmHelperGroup extends HelperGroup { - async whitelistToken(signer: TSigner, assetId: any) { - await this.helper.executeExtrinsic(signer, 'api.tx.xcmHelper.whitelistToken', [assetId], true); - } -} -class TokensGroup extends HelperGroup { + +export class TokensGroup extends HelperGroup { async accounts(address: string, currencyId: any) { const {free} = (await this.helper.callRpc('api.query.tokens.accounts', [address, currencyId])).toJSON() as any; return BigInt(free); } } -class AssetsGroup extends HelperGroup { +export class AssetsGroup extends HelperGroup { async create(signer: TSigner, assetId: number, admin: string, minimalBalance: bigint) { await this.helper.executeExtrinsic(signer, 'api.tx.assets.create', [assetId, admin, minimalBalance], true); } @@ -3755,86 +3748,7 @@ class UtilityGroup extends HelperGroup { } } -class AcalaAssetRegistryGroup extends HelperGroup { - async registerForeignAsset(signer: TSigner, destination: any, metadata: AcalaAssetMetadata) { - await this.helper.executeExtrinsic(signer, 'api.tx.assetRegistry.registerForeignAsset', [destination, metadata], true); - } -} - -class MoonbeamAssetManagerGroup extends HelperGroup { - makeRegisterForeignAssetProposal(assetInfo: MoonbeamAssetInfo) { - const apiPrefix = 'api.tx.assetManager.'; - - const registerTx = this.helper.constructApiCall( - apiPrefix + 'registerForeignAsset', - [assetInfo.location, assetInfo.metadata, assetInfo.existentialDeposit, assetInfo.isSufficient], - ); - - const setUnitsTx = this.helper.constructApiCall( - apiPrefix + 'setAssetUnitsPerSecond', - [assetInfo.location, assetInfo.unitsPerSecond, assetInfo.numAssetsWeightHint], - ); - - const batchCall = this.helper.getApi().tx.utility.batchAll([registerTx, setUnitsTx]); - const encodedProposal = batchCall?.method.toHex() || ''; - return encodedProposal; - } - - async assetTypeId(location: any) { - return await this.helper.callRpc('api.query.assetManager.assetTypeId', [location]); - } -} - -class MoonbeamDemocracyGroup extends HelperGroup { - notePreimagePallet: string; - - constructor(helper: MoonbeamHelper, options: { [key: string]: any } = {}) { - super(helper); - this.notePreimagePallet = options.notePreimagePallet; - } - - async notePreimage(signer: TSigner, encodedProposal: string) { - await this.helper.executeExtrinsic(signer, `api.tx.${this.notePreimagePallet}.notePreimage`, [encodedProposal], true); - } - - externalProposeMajority(proposal: any) { - return this.helper.constructApiCall('api.tx.democracy.externalProposeMajority', [proposal]); - } - - fastTrack(proposalHash: string, votingPeriod: number, delayPeriod: number) { - return this.helper.constructApiCall('api.tx.democracy.fastTrack', [proposalHash, votingPeriod, delayPeriod]); - } - - async referendumVote(signer: TSigner, referendumIndex: number, accountVote: DemocracyStandardAccountVote) { - await this.helper.executeExtrinsic(signer, 'api.tx.democracy.vote', [referendumIndex, {Standard: accountVote}], true); - } -} - -class MoonbeamCollectiveGroup extends HelperGroup { - collective: string; - - constructor(helper: MoonbeamHelper, collective: string) { - super(helper); - this.collective = collective; - } - - async propose(signer: TSigner, threshold: number, proposalHash: string, lengthBound: number) { - await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.propose`, [threshold, proposalHash, lengthBound], true); - } - - async vote(signer: TSigner, proposalHash: string, proposalIndex: number, approve: boolean) { - await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.vote`, [proposalHash, proposalIndex, approve], true); - } - - async close(signer: TSigner, proposalHash: string, proposalIndex: number, weightBound: any, lengthBound: number) { - await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.close`, [proposalHash, proposalIndex, weightBound, lengthBound], true); - } - - async proposalCount() { - return Number(await this.helper.callRpc(`api.query.${this.collective}.proposalCount`, [])); - } -} export type ChainHelperBaseConstructor = new (...args: any[]) => ChainHelperBase; export type UniqueHelperConstructor = new (...args: any[]) => UniqueHelper; @@ -3890,145 +3804,6 @@ export class UniqueHelper extends ChainHelperBase { this.tokens = new TokensGroup(this); this.utility = new UtilityGroup(this); } - - getSudo() { - // eslint-disable-next-line @typescript-eslint/naming-convention - const SudoHelperType = SudoHelper(this.helperBase); - return this.clone(SudoHelperType) as T; - } -} - -export class XcmChainHelper extends ChainHelperBase { - async connect(wsEndpoint: string, _listeners?: any): Promise { - const wsProvider = new WsProvider(wsEndpoint); - this.api = new ApiPromise({ - provider: wsProvider, - }); - await this.api.isReadyOrError; - this.network = await UniqueHelper.detectNetwork(this.api); - } -} - -export class RelayHelper extends XcmChainHelper { - balance: SubstrateBalanceGroup; - xcm: XcmGroup; - - constructor(logger?: ILogger, options: { [key: string]: any } = {}) { - super(logger, options.helperBase ?? RelayHelper); - - this.balance = new SubstrateBalanceGroup(this); - this.xcm = new XcmGroup(this, 'xcmPallet'); - } -} - -export class WestmintHelper extends XcmChainHelper { - balance: SubstrateBalanceGroup; - xcm: XcmGroup; - assets: AssetsGroup; - xTokens: XTokensGroup; - - constructor(logger?: ILogger, options: { [key: string]: any } = {}) { - super(logger, options.helperBase ?? WestmintHelper); - - this.balance = new SubstrateBalanceGroup(this); - this.xcm = new XcmGroup(this, 'polkadotXcm'); - this.assets = new AssetsGroup(this); - this.xTokens = new XTokensGroup(this); - } -} - -export class MoonbeamHelper extends XcmChainHelper { - balance: EthereumBalanceGroup; - assetManager: MoonbeamAssetManagerGroup; - assets: AssetsGroup; - xTokens: XTokensGroup; - democracy: MoonbeamDemocracyGroup; - collective: { - council: MoonbeamCollectiveGroup, - techCommittee: MoonbeamCollectiveGroup, - }; - - constructor(logger?: ILogger, options: { [key: string]: any } = {}) { - super(logger, options.helperBase ?? MoonbeamHelper); - - this.balance = new EthereumBalanceGroup(this); - this.assetManager = new MoonbeamAssetManagerGroup(this); - this.assets = new AssetsGroup(this); - this.xTokens = new XTokensGroup(this); - this.democracy = new MoonbeamDemocracyGroup(this, options); - this.collective = { - council: new MoonbeamCollectiveGroup(this, 'councilCollective'), - techCommittee: new MoonbeamCollectiveGroup(this, 'techCommitteeCollective'), - }; - } -} - -export class AstarHelper extends XcmChainHelper { - balance: SubstrateBalanceGroup; - assets: AssetsGroup; - xcm: XcmGroup; - - constructor(logger?: ILogger, options: { [key: string]: any } = {}) { - super(logger, options.helperBase ?? AstarHelper); - - this.balance = new SubstrateBalanceGroup(this); - this.assets = new AssetsGroup(this); - this.xcm = new XcmGroup(this, 'polkadotXcm'); - } - - getSudo() { - // eslint-disable-next-line @typescript-eslint/naming-convention - const SudoHelperType = SudoHelper(this.helperBase); - return this.clone(SudoHelperType) as T; - } -} - -export class AcalaHelper extends XcmChainHelper { - balance: SubstrateBalanceGroup; - assetRegistry: AcalaAssetRegistryGroup; - xTokens: XTokensGroup; - tokens: TokensGroup; - xcm: XcmGroup; - - constructor(logger?: ILogger, options: { [key: string]: any } = {}) { - super(logger, options.helperBase ?? AcalaHelper); - - this.balance = new SubstrateBalanceGroup(this); - this.assetRegistry = new AcalaAssetRegistryGroup(this); - this.xTokens = new XTokensGroup(this); - this.tokens = new TokensGroup(this); - this.xcm = new XcmGroup(this, 'polkadotXcm'); - } - - getSudo() { - // eslint-disable-next-line @typescript-eslint/naming-convention - const SudoHelperType = SudoHelper(this.helperBase); - return this.clone(SudoHelperType) as T; - } -} - -export class PolkadexHelper extends XcmChainHelper { - assets: AssetsGroup; - balance: SubstrateBalanceGroup; - xTokens: XTokensGroup; - xcm: XcmGroup; - xcmHelper: PolkadexXcmHelperGroup; - - constructor(logger?: ILogger, options: { [key: string]: any } = {}) { - super(logger, options.helperBase ?? PolkadexHelper); - - this.assets = new AssetsGroup(this); - this.balance = new SubstrateBalanceGroup(this); - this.xTokens = new XTokensGroup(this); - this.xcm = new XcmGroup(this, 'polkadotXcm'); - this.xcmHelper = new PolkadexXcmHelperGroup(this); - } - - getSudo() { - // eslint-disable-next-line @typescript-eslint/naming-convention - const SudoHelperType = SudoHelper(this.helperBase); - return this.clone(SudoHelperType) as T; - } } // eslint-disable-next-line @typescript-eslint/naming-convention @@ -4091,80 +3866,6 @@ function ScheduledUniqueHelper(Base: T) { }; } -// eslint-disable-next-line @typescript-eslint/naming-convention -function SudoHelper(Base: T) { - return class extends Base { - constructor(...args: any[]) { - super(...args); - } - - async executeExtrinsic( - sender: IKeyringPair, - extrinsic: string, - params: any[], - expectSuccess?: boolean, - options: Partial | null = null, - ): Promise { - const call = this.constructApiCall(extrinsic, params); - const result = await super.executeExtrinsic( - sender, - 'api.tx.sudo.sudo', - [call], - expectSuccess, - options, - ); - - if(result.status === 'Fail') return result; - - const data = (result.result.events.find(x => x.event.section == 'sudo' && x.event.method == 'Sudid')?.event.data as any).sudoResult; - if(data.isErr) { - if(data.asErr.isModule) { - const error = (result.result.events[1].event.data as any).sudoResult.asErr.asModule; - const metaError = super.getApi()?.registry.findMetaError(error); - throw new Error(`${metaError.section}.${metaError.name}`); - } else if(data.asErr.isToken) { - throw new Error(`Token: ${data.asErr.asToken}`); - } - // May be [object Object] in case of unhandled non-unit enum - throw new Error(`Misc: ${data.asErr.toHuman()}`); - } - return result; - } - async executeExtrinsicUncheckedWeight( - sender: IKeyringPair, - extrinsic: string, - params: any[], - expectSuccess?: boolean, - options: Partial | null = null, - ): Promise { - const call = this.constructApiCall(extrinsic, params); - const result = await super.executeExtrinsic( - sender, - 'api.tx.sudo.sudoUncheckedWeight', - [call, {refTime: 0, proofSize: 0}], - expectSuccess, - options, - ); - - if(result.status === 'Fail') return result; - - const data = (result.result.events.find(x => x.event.section == 'sudo' && x.event.method == 'Sudid')?.event.data as any).sudoResult; - if(data.isErr) { - if(data.asErr.isModule) { - const error = (result.result.events[1].event.data as any).sudoResult.asErr.asModule; - const metaError = super.getApi()?.registry.findMetaError(error); - throw new Error(`${metaError.section}.${metaError.name}`); - } else if(data.asErr.isToken) { - throw new Error(`Token: ${data.asErr.asToken}`); - } - // May be [object Object] in case of unhandled non-unit enum - throw new Error(`Misc: ${data.asErr.toHuman()}`); - } - return result; - } - }; -} - export class UniqueBaseCollection { helper: UniqueHelper; collectionId: number; @@ -4289,13 +3990,8 @@ export class UniqueBaseCollection { const scheduledHelper = this.helper.scheduler.scheduleAfter(blocksBeforeExecution, options); return new UniqueBaseCollection(this.collectionId, scheduledHelper); } - - getSudo() { - return new UniqueBaseCollection(this.collectionId, this.helper.getSudo()); - } } - export class UniqueNFTCollection extends UniqueBaseCollection { getTokenObject(tokenId: number) { return new UniqueNFToken(tokenId, this); @@ -4403,13 +4099,8 @@ export class UniqueNFTCollection extends UniqueBaseCollection { const scheduledHelper = this.helper.scheduler.scheduleAfter(blocksBeforeExecution, options); return new UniqueNFTCollection(this.collectionId, scheduledHelper); } - - getSudo() { - return new UniqueNFTCollection(this.collectionId, this.helper.getSudo()); - } } - export class UniqueRFTCollection extends UniqueBaseCollection { getTokenObject(tokenId: number) { return new UniqueRFToken(tokenId, this); @@ -4529,13 +4220,8 @@ export class UniqueRFTCollection extends UniqueBaseCollection { const scheduledHelper = this.helper.scheduler.scheduleAfter(blocksBeforeExecution, options); return new UniqueRFTCollection(this.collectionId, scheduledHelper); } - - getSudo() { - return new UniqueRFTCollection(this.collectionId, this.helper.getSudo()); - } } - export class UniqueFTCollection extends UniqueBaseCollection { async getBalance(addressObj: ICrossAccountId) { return await this.helper.ft.getBalance(this.collectionId, addressObj); @@ -4596,13 +4282,8 @@ export class UniqueFTCollection extends UniqueBaseCollection { const scheduledHelper = this.helper.scheduler.scheduleAfter(blocksBeforeExecution, options); return new UniqueFTCollection(this.collectionId, scheduledHelper); } - - getSudo() { - return new UniqueFTCollection(this.collectionId, this.helper.getSudo()); - } } - export class UniqueBaseToken { collection: UniqueNFTCollection | UniqueRFTCollection; collectionId: number; @@ -4657,13 +4338,8 @@ export class UniqueBaseToken { const scheduledCollection = this.collection.scheduleAfter(blocksBeforeExecution, options); return new UniqueBaseToken(this.tokenId, scheduledCollection); } - - getSudo() { - return new UniqueBaseToken(this.tokenId, this.collection.getSudo()); - } } - export class UniqueNFToken extends UniqueBaseToken { collection: UniqueNFTCollection; @@ -4735,10 +4411,6 @@ export class UniqueNFToken extends UniqueBaseToken { const scheduledCollection = this.collection.scheduleAfter(blocksBeforeExecution, options); return new UniqueNFToken(this.tokenId, scheduledCollection); } - - getSudo() { - return new UniqueNFToken(this.tokenId, this.collection.getSudo()); - } } export class UniqueRFToken extends UniqueBaseToken { @@ -4824,8 +4496,4 @@ export class UniqueRFToken extends UniqueBaseToken { const scheduledCollection = this.collection.scheduleAfter(blocksBeforeExecution, options); return new UniqueRFToken(this.tokenId, scheduledCollection); } - - getSudo() { - return new UniqueRFToken(this.tokenId, this.collection.getSudo()); - } } diff --git a/tests/src/util/playgrounds/unique.xcm.ts b/tests/src/util/playgrounds/unique.xcm.ts new file mode 100644 index 0000000000..376f6a86d2 --- /dev/null +++ b/tests/src/util/playgrounds/unique.xcm.ts @@ -0,0 +1,208 @@ + +import {ApiPromise, WsProvider} from '@polkadot/api'; +import {AssetsGroup, ChainHelperBase, EthereumBalanceGroup, HelperGroup, SubstrateBalanceGroup, TokensGroup, UniqueHelper, XTokensGroup, XcmGroup} from './unique'; +import {ILogger, TSigner} from './types'; +import {SudoHelper} from './unique.dev'; +import {AcalaAssetMetadata, DemocracyStandardAccountVote, MoonbeamAssetInfo} from './types.xcm'; + +export class XcmChainHelper extends ChainHelperBase { + async connect(wsEndpoint: string, _listeners?: any): Promise { + const wsProvider = new WsProvider(wsEndpoint); + this.api = new ApiPromise({ + provider: wsProvider, + }); + await this.api.isReadyOrError; + this.network = await UniqueHelper.detectNetwork(this.api); + } +} + +class AcalaAssetRegistryGroup extends HelperGroup { + async registerForeignAsset(signer: TSigner, destination: any, metadata: AcalaAssetMetadata) { + await this.helper.executeExtrinsic(signer, 'api.tx.assetRegistry.registerForeignAsset', [destination, metadata], true); + } +} + +class MoonbeamAssetManagerGroup extends HelperGroup { + makeRegisterForeignAssetProposal(assetInfo: MoonbeamAssetInfo) { + const apiPrefix = 'api.tx.assetManager.'; + + const registerTx = this.helper.constructApiCall( + apiPrefix + 'registerForeignAsset', + [assetInfo.location, assetInfo.metadata, assetInfo.existentialDeposit, assetInfo.isSufficient], + ); + + const setUnitsTx = this.helper.constructApiCall( + apiPrefix + 'setAssetUnitsPerSecond', + [assetInfo.location, assetInfo.unitsPerSecond, assetInfo.numAssetsWeightHint], + ); + + const batchCall = this.helper.getApi().tx.utility.batchAll([registerTx, setUnitsTx]); + const encodedProposal = batchCall?.method.toHex() || ''; + return encodedProposal; + } + + async assetTypeId(location: any) { + return await this.helper.callRpc('api.query.assetManager.assetTypeId', [location]); + } +} + +class MoonbeamDemocracyGroup extends HelperGroup { + notePreimagePallet: string; + + constructor(helper: MoonbeamHelper, options: { [key: string]: any } = {}) { + super(helper); + this.notePreimagePallet = options.notePreimagePallet; + } + + async notePreimage(signer: TSigner, encodedProposal: string) { + await this.helper.executeExtrinsic(signer, `api.tx.${this.notePreimagePallet}.notePreimage`, [encodedProposal], true); + } + + externalProposeMajority(proposal: any) { + return this.helper.constructApiCall('api.tx.democracy.externalProposeMajority', [proposal]); + } + + fastTrack(proposalHash: string, votingPeriod: number, delayPeriod: number) { + return this.helper.constructApiCall('api.tx.democracy.fastTrack', [proposalHash, votingPeriod, delayPeriod]); + } + + async referendumVote(signer: TSigner, referendumIndex: number, accountVote: DemocracyStandardAccountVote) { + await this.helper.executeExtrinsic(signer, 'api.tx.democracy.vote', [referendumIndex, {Standard: accountVote}], true); + } +} + +class MoonbeamCollectiveGroup extends HelperGroup { + collective: string; + + constructor(helper: MoonbeamHelper, collective: string) { + super(helper); + + this.collective = collective; + } + + async propose(signer: TSigner, threshold: number, proposalHash: string, lengthBound: number) { + await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.propose`, [threshold, proposalHash, lengthBound], true); + } + + async vote(signer: TSigner, proposalHash: string, proposalIndex: number, approve: boolean) { + await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.vote`, [proposalHash, proposalIndex, approve], true); + } + + async close(signer: TSigner, proposalHash: string, proposalIndex: number, weightBound: any, lengthBound: number) { + await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.close`, [proposalHash, proposalIndex, weightBound, lengthBound], true); + } + + async proposalCount() { + return Number(await this.helper.callRpc(`api.query.${this.collective}.proposalCount`, [])); + } +} + +class PolkadexXcmHelperGroup extends HelperGroup { + async whitelistToken(signer: TSigner, assetId: any) { + await this.helper.executeExtrinsic(signer, 'api.tx.xcmHelper.whitelistToken', [assetId], true); + } +} +export class RelayHelper extends XcmChainHelper { + balance: SubstrateBalanceGroup; + xcm: XcmGroup; + + constructor(logger?: ILogger, options: { [key: string]: any } = {}) { + super(logger, options.helperBase ?? RelayHelper); + + this.balance = new SubstrateBalanceGroup(this); + this.xcm = new XcmGroup(this, 'xcmPallet'); + } +} + +export class WestmintHelper extends XcmChainHelper { + balance: SubstrateBalanceGroup; + xcm: XcmGroup; + assets: AssetsGroup; + xTokens: XTokensGroup; + + constructor(logger?: ILogger, options: { [key: string]: any } = {}) { + super(logger, options.helperBase ?? WestmintHelper); + + this.balance = new SubstrateBalanceGroup(this); + this.xcm = new XcmGroup(this, 'polkadotXcm'); + this.assets = new AssetsGroup(this); + this.xTokens = new XTokensGroup(this); + } +} + +export class MoonbeamHelper extends XcmChainHelper { + balance: EthereumBalanceGroup; + assetManager: MoonbeamAssetManagerGroup; + assets: AssetsGroup; + xTokens: XTokensGroup; + democracy: MoonbeamDemocracyGroup; + collective: { + council: MoonbeamCollectiveGroup, + techCommittee: MoonbeamCollectiveGroup, + }; + + constructor(logger?: ILogger, options: { [key: string]: any } = {}) { + super(logger, options.helperBase ?? MoonbeamHelper); + + this.balance = new EthereumBalanceGroup(this); + this.assetManager = new MoonbeamAssetManagerGroup(this); + this.assets = new AssetsGroup(this); + this.xTokens = new XTokensGroup(this); + this.democracy = new MoonbeamDemocracyGroup(this, options); + this.collective = { + council: new MoonbeamCollectiveGroup(this, 'councilCollective'), + techCommittee: new MoonbeamCollectiveGroup(this, 'techCommitteeCollective'), + }; + } +} + +export class AstarHelper extends XcmChainHelper { + balance: SubstrateBalanceGroup; + assets: AssetsGroup; + xcm: XcmGroup; + + constructor(logger?: ILogger, options: { [key: string]: any } = {}) { + super(logger, options.helperBase ?? AstarHelper); + + this.balance = new SubstrateBalanceGroup(this); + this.assets = new AssetsGroup(this); + this.xcm = new XcmGroup(this, 'polkadotXcm'); + } +} + +export class AcalaHelper extends XcmChainHelper { + balance: SubstrateBalanceGroup; + assetRegistry: AcalaAssetRegistryGroup; + xTokens: XTokensGroup; + tokens: TokensGroup; + xcm: XcmGroup; + + constructor(logger?: ILogger, options: { [key: string]: any } = {}) { + super(logger, options.helperBase ?? AcalaHelper); + + this.balance = new SubstrateBalanceGroup(this); + this.assetRegistry = new AcalaAssetRegistryGroup(this); + this.xTokens = new XTokensGroup(this); + this.tokens = new TokensGroup(this); + this.xcm = new XcmGroup(this, 'polkadotXcm'); + } +} + +export class PolkadexHelper extends XcmChainHelper { + assets: AssetsGroup; + balance: SubstrateBalanceGroup; + xTokens: XTokensGroup; + xcm: XcmGroup; + xcmHelper: PolkadexXcmHelperGroup; + + constructor(logger?: ILogger, options: { [key: string]: any } = {}) { + super(logger, options.helperBase ?? PolkadexHelper); + + this.assets = new AssetsGroup(this); + this.balance = new SubstrateBalanceGroup(this); + this.xTokens = new XTokensGroup(this); + this.xcm = new XcmGroup(this, 'polkadotXcm'); + this.xcmHelper = new PolkadexXcmHelperGroup(this); + } +} + From 04e6d946dd25dbb02df6caeccc922662b8127e05 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 14 Sep 2023 11:02:36 +0000 Subject: [PATCH 020/143] tests: move scheduler to dev, move governance to separate file --- tests/src/governance/util.ts | 3 +- tests/src/util/playgrounds/types.ts | 7 - tests/src/util/playgrounds/types.xcm.ts | 7 + tests/src/util/playgrounds/unique.dev.ts | 210 +++- .../src/util/playgrounds/unique.governance.ts | 531 +++++++++ tests/src/util/playgrounds/unique.ts | 1008 +---------------- tests/src/util/playgrounds/unique.xcm.ts | 173 ++- 7 files changed, 916 insertions(+), 1023 deletions(-) create mode 100644 tests/src/util/playgrounds/unique.governance.ts diff --git a/tests/src/governance/util.ts b/tests/src/governance/util.ts index 870741a304..3b213445bb 100644 --- a/tests/src/governance/util.ts +++ b/tests/src/governance/util.ts @@ -2,6 +2,7 @@ import {IKeyringPair} from '@polkadot/types/types'; import {xxhashAsHex} from '@polkadot/util-crypto'; import {usingPlaygrounds, expect} from '../util'; import {UniqueHelper} from '../util/playgrounds/unique'; +import {DevUniqueHelper} from '../util/playgrounds/unique.dev'; export const democracyLaunchPeriod = 35; export const democracyVotingPeriod = 35; @@ -203,7 +204,7 @@ export async function hardResetGovScheduler(sudoer: IKeyringPair) { }); } -export async function voteUnanimouslyInFellowship(helper: UniqueHelper, fellows: IKeyringPair[][], minRank: number, referendumIndex: number) { +export async function voteUnanimouslyInFellowship(helper: DevUniqueHelper, fellows: IKeyringPair[][], minRank: number, referendumIndex: number) { for(let rank = minRank; rank < fellowshipRankLimit; rank++) { for(const member of fellows[rank]) { await helper.fellowship.collective.vote(member, referendumIndex, true); diff --git a/tests/src/util/playgrounds/types.ts b/tests/src/util/playgrounds/types.ts index 26ac76cd92..f2948a81f6 100644 --- a/tests/src/util/playgrounds/types.ts +++ b/tests/src/util/playgrounds/types.ts @@ -216,13 +216,6 @@ export interface ISchedulerOptions { }, } -export interface IForeignAssetMetadata { - name?: number | Uint8Array, - symbol?: string, - decimals?: number, - minimalBalance?: bigint, -} - export interface DemocracySplitAccount { aye: bigint, nay: bigint, diff --git a/tests/src/util/playgrounds/types.xcm.ts b/tests/src/util/playgrounds/types.xcm.ts index 5ac5692fc8..161bc78e61 100644 --- a/tests/src/util/playgrounds/types.xcm.ts +++ b/tests/src/util/playgrounds/types.xcm.ts @@ -26,4 +26,11 @@ export interface DemocracyStandardAccountVote { aye: boolean, conviction: number, }, +} + +export interface IForeignAssetMetadata { + name?: number | Uint8Array, + symbol?: string, + decimals?: number, + minimalBalance?: bigint, } \ No newline at end of file diff --git a/tests/src/util/playgrounds/unique.dev.ts b/tests/src/util/playgrounds/unique.dev.ts index 63bc6f5a3a..59fc41641c 100644 --- a/tests/src/util/playgrounds/unique.dev.ts +++ b/tests/src/util/playgrounds/unique.dev.ts @@ -3,17 +3,18 @@ import {stringToU8a} from '@polkadot/util'; import {blake2AsHex, encodeAddress, mnemonicGenerate} from '@polkadot/util-crypto'; -import {UniqueHelper, ChainHelperBase, ChainHelperBaseConstructor} from './unique'; +import {UniqueHelper, ChainHelperBase, ChainHelperBaseConstructor, HelperGroup, UniqueHelperConstructor} from './unique'; import {ApiPromise, Keyring, WsProvider} from '@polkadot/api'; import * as defs from '../../interfaces/definitions'; import {IKeyringPair} from '@polkadot/types/types'; import {EventRecord} from '@polkadot/types/interfaces'; -import {ICrossAccountId, IPovInfo, ITransactionResult, TSigner} from './types'; +import {ICrossAccountId, ILogger, IPovInfo, ISchedulerOptions, ITransactionResult, TSigner} from './types'; import {FrameSystemEventRecord, XcmV2TraitsError} from '@polkadot/types/lookup'; import {SignerOptions, VoidFn} from '@polkadot/api/types'; import {Pallets} from '..'; import {spawnSync} from 'child_process'; -import {AcalaHelper, AstarHelper, MoonbeamHelper, PolkadexHelper, RelayHelper, WestmintHelper} from './unique.xcm'; +import {AcalaHelper, AstarHelper, MoonbeamHelper, PolkadexHelper, RelayHelper, WestmintHelper, ForeignAssetsGroup, XcmGroup, XTokensGroup, TokensGroup} from './unique.xcm'; +import {CollectiveGroup, CollectiveMembershipGroup, DemocracyGroup, ICollectiveGroup, IFellowshipGroup, RankedCollectiveGroup, ReferendaGroup} from './unique.governance'; export class SilentLogger { log(_msg: any, _level: any): void { } @@ -335,6 +336,118 @@ export function SudoHelper(Base: T) { }; } +class SchedulerGroup extends HelperGroup { + constructor(helper: UniqueHelper) { + super(helper); + } + + cancelScheduled(signer: TSigner, scheduledId: string) { + return this.helper.executeExtrinsic( + signer, + 'api.tx.scheduler.cancelNamed', + [scheduledId], + true, + ); + } + + changePriority(signer: TSigner, scheduledId: string, priority: number) { + return this.helper.executeExtrinsic( + signer, + 'api.tx.scheduler.changeNamedPriority', + [scheduledId, priority], + true, + ); + } + + scheduleAt( + executionBlockNumber: number, + options: ISchedulerOptions = {}, + ) { + return this.schedule('schedule', executionBlockNumber, options); + } + + scheduleAfter( + blocksBeforeExecution: number, + options: ISchedulerOptions = {}, + ) { + return this.schedule('scheduleAfter', blocksBeforeExecution, options); + } + + schedule( + scheduleFn: 'schedule' | 'scheduleAfter', + blocksNum: number, + options: ISchedulerOptions = {}, + ) { + // eslint-disable-next-line @typescript-eslint/naming-convention + const ScheduledHelperType = ScheduledUniqueHelper(this.helper.helperBase); + return this.helper.clone(ScheduledHelperType, { + scheduleFn, + blocksNum, + options, + }) as T; + } +} + +class CollatorSelectionGroup extends HelperGroup { + //todo:collator documentation + addInvulnerable(signer: TSigner, address: string) { + return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.addInvulnerable', [address]); + } + + removeInvulnerable(signer: TSigner, address: string) { + return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.removeInvulnerable', [address]); + } + + async getInvulnerables(): Promise { + return (await this.helper.callRpc('api.query.collatorSelection.invulnerables')).map((x: any) => x.toHuman()); + } + + /** and also total max invulnerables */ + maxCollators(): number { + return (this.helper.getApi().consts.configuration.defaultCollatorSelectionMaxCollators.toJSON() as number); + } + + async getDesiredCollators(): Promise { + return (await this.helper.callRpc('api.query.configuration.collatorSelectionDesiredCollatorsOverride')).toNumber(); + } + + setLicenseBond(signer: TSigner, amount: bigint) { + return this.helper.executeExtrinsic(signer, 'api.tx.configuration.setCollatorSelectionLicenseBond', [amount]); + } + + async getLicenseBond(): Promise { + return (await this.helper.callRpc('api.query.configuration.collatorSelectionLicenseBondOverride')).toBigInt(); + } + + obtainLicense(signer: TSigner) { + return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.getLicense', []); + } + + releaseLicense(signer: TSigner) { + return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.releaseLicense', []); + } + + forceReleaseLicense(signer: TSigner, released: string) { + return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.forceReleaseLicense', [released]); + } + + async hasLicense(address: string): Promise { + return (await this.helper.callRpc('api.query.collatorSelection.licenseDepositOf', [address])).toBigInt(); + } + + onboard(signer: TSigner) { + return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.onboard', []); + } + + offboard(signer: TSigner) { + return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.offboard', []); + } + + async getCandidates(): Promise { + return (await this.helper.callRpc('api.query.collatorSelection.candidates')).map((x: any) => x.toHuman()); + } +} + export class DevUniqueHelper extends UniqueHelper { /** * Arrange methods for tests @@ -344,6 +457,16 @@ export class DevUniqueHelper extends UniqueHelper { admin: AdminGroup; session: SessionGroup; testUtils: TestUtilGroup; + foreignAssets: ForeignAssetsGroup; + xcm: XcmGroup; + xTokens: XTokensGroup; + tokens: TokensGroup; + scheduler: SchedulerGroup; + collatorSelection: CollatorSelectionGroup; + council: ICollectiveGroup; + technicalCommittee: ICollectiveGroup; + fellowship: IFellowshipGroup; + democracy: DemocracyGroup; constructor(logger: { log: (msg: any, level: any) => void, level: any }, options: {[key: string]: any} = {}) { options.helperBase = options.helperBase ?? DevUniqueHelper; @@ -354,6 +477,25 @@ export class DevUniqueHelper extends UniqueHelper { this.admin = new AdminGroup(this); this.testUtils = new TestUtilGroup(this); this.session = new SessionGroup(this); + this.foreignAssets = new ForeignAssetsGroup(this); + this.xcm = new XcmGroup(this, 'polkadotXcm'); + this.xTokens = new XTokensGroup(this); + this.tokens = new TokensGroup(this); + this.scheduler = new SchedulerGroup(this); + this.collatorSelection = new CollatorSelectionGroup(this); + this.council = { + collective: new CollectiveGroup(this, 'council'), + membership: new CollectiveMembershipGroup(this, 'councilMembership'), + }; + this.technicalCommittee = { + collective: new CollectiveGroup(this, 'technicalCommittee'), + membership: new CollectiveMembershipGroup(this, 'technicalCommitteeMembership'), + }; + this.fellowship = { + collective: new RankedCollectiveGroup(this, 'fellowshipCollective'), + referenda: new ReferendaGroup(this, 'fellowshipReferenda'), + }; + this.democracy = new DemocracyGroup(this); } async connect(wsEndpoint: string, _listeners?: any): Promise { @@ -401,7 +543,7 @@ export class DevUniqueHelper extends UniqueHelper { this.network = await UniqueHelper.detectNetwork(this.api); this.wsEndpoint = wsEndpoint; } - getSudo() { + getSudo() { // eslint-disable-next-line @typescript-eslint/naming-convention const SudoHelperType = SudoHelper(this.helperBase); return this.clone(SudoHelperType) as T; @@ -1308,3 +1450,63 @@ class AdminGroup { })); } } + +// eslint-disable-next-line @typescript-eslint/naming-convention +function ScheduledUniqueHelper(Base: T) { + return class extends Base { + scheduleFn: 'schedule' | 'scheduleAfter'; + blocksNum: number; + options: ISchedulerOptions; + + constructor(...args: any[]) { + const logger = args[0] as ILogger; + const options = args[1] as { + scheduleFn: 'schedule' | 'scheduleAfter', + blocksNum: number, + options: ISchedulerOptions + }; + + super(logger); + + this.scheduleFn = options.scheduleFn; + this.blocksNum = options.blocksNum; + this.options = options.options; + } + + executeExtrinsic(sender: IKeyringPair, scheduledExtrinsic: string, scheduledParams: any[], expectSuccess?: boolean): Promise { + const scheduledTx = this.constructApiCall(scheduledExtrinsic, scheduledParams); + + const mandatorySchedArgs = [ + this.blocksNum, + this.options.periodic ? [this.options.periodic.period, this.options.periodic.repetitions] : null, + this.options.priority ?? null, + scheduledTx, + ]; + + let schedArgs; + let scheduleFn; + + if(this.options.scheduledId) { + schedArgs = [this.options.scheduledId!, ...mandatorySchedArgs]; + + if(this.scheduleFn == 'schedule') { + scheduleFn = 'scheduleNamed'; + } else if(this.scheduleFn == 'scheduleAfter') { + scheduleFn = 'scheduleNamedAfter'; + } + } else { + schedArgs = mandatorySchedArgs; + scheduleFn = this.scheduleFn; + } + + const extrinsic = 'api.tx.scheduler.' + scheduleFn; + + return super.executeExtrinsic( + sender, + extrinsic as any, + schedArgs, + expectSuccess, + ); + } + }; +} \ No newline at end of file diff --git a/tests/src/util/playgrounds/unique.governance.ts b/tests/src/util/playgrounds/unique.governance.ts new file mode 100644 index 0000000000..b3969069fb --- /dev/null +++ b/tests/src/util/playgrounds/unique.governance.ts @@ -0,0 +1,531 @@ +import {blake2AsHex} from '@polkadot/util-crypto'; +import {PalletDemocracyConviction} from '@polkadot/types/lookup'; +import {IPhasicEvent, TSigner} from './types'; +import {HelperGroup, UniqueHelper} from './unique'; + +export class CollectiveGroup extends HelperGroup { + /** + * Pallet name to make an API call to. Examples: 'council', 'technicalCommittee' + */ + private collective: string; + + constructor(helper: UniqueHelper, collective: string) { + super(helper); + this.collective = collective; + } + + /** + * Check the result of a proposal execution for the success of the underlying proposed extrinsic. + * @param events events of the proposal execution + * @returns proposal hash + */ + private checkExecutedEvent(events: IPhasicEvent[]): string { + const executionEvents = events.filter(x => + x.event.section === this.collective && (x.event.method === 'Executed' || x.event.method === 'MemberExecuted')); + + if(executionEvents.length != 1) { + if(events.filter(x => x.event.section === this.collective && x.event.method === 'Disapproved').length > 0) + throw new Error(`Disapproved by ${this.collective}`); + else + throw new Error(`Expected one 'Executed' or 'MemberExecuted' event for ${this.collective}`); + } + + const result = (executionEvents[0].event.data as any).result; + + if(result.isErr) { + if(result.asErr.isModule) { + const error = result.asErr.asModule; + const metaError = this.helper.getApi()?.registry.findMetaError(error); + throw new Error(`Proposal execution failed with ${metaError.section}.${metaError.name}`); + } else { + throw new Error('Proposal execution failed with ' + result.asErr.toHuman()); + } + } + + return (executionEvents[0].event.data as any).proposalHash; + } + + /** + * Returns an array of members' addresses. + */ + async getMembers() { + return (await this.helper.callRpc(`api.query.${this.collective}.members`, [])).toHuman(); + } + + /** + * Returns the optional address of the prime member of the collective. + */ + async getPrimeMember() { + return (await this.helper.callRpc(`api.query.${this.collective}.prime`, [])).toHuman(); + } + + /** + * Returns an array of proposal hashes that are currently active for this collective. + */ + async getProposals() { + return (await this.helper.callRpc(`api.query.${this.collective}.proposals`, [])).toHuman(); + } + + /** + * Returns the call originally encoded under the specified hash. + * @param hash h256-encoded proposal + * @returns the optional call that the proposal hash stands for. + */ + async getProposalCallOf(hash: string) { + return (await this.helper.callRpc(`api.query.${this.collective}.proposalOf`, [hash])).toHuman(); + } + + /** + * Returns the total number of proposals so far. + */ + async getTotalProposalsCount() { + return (await this.helper.callRpc(`api.query.${this.collective}.proposalCount`, [])).toNumber(); + } + + /** + * Creates a new proposal up for voting. If the threshold is set to 1, the proposal will be executed immediately. + * @param signer keyring of the proposer + * @param proposal constructed call to be executed if the proposal is successful + * @param voteThreshold minimal number of votes for the proposal to be verified and executed + * @param lengthBound byte length of the encoded call + * @returns promise of extrinsic execution and its result + */ + async propose(signer: TSigner, proposal: any, voteThreshold: number, lengthBound = 10000) { + return await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.propose`, [voteThreshold, proposal, lengthBound]); + } + + /** + * Casts a vote to either approve or reject a proposal. + * @param signer keyring of the voter + * @param proposalHash hash of the proposal to be voted for + * @param proposalIndex absolute index of the proposal used for absolutely nothing but throwing pointless errors + * @param approve aye or nay + * @returns promise of extrinsic execution and its result + */ + vote(signer: TSigner, proposalHash: string, proposalIndex: number, approve: boolean) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.vote`, [proposalHash, proposalIndex, approve]); + } + + /** + * Executes a call immediately as a member of the collective. Needed for the Member origin. + * @param signer keyring of the executor member + * @param proposal constructed call to be executed by the member + * @param lengthBound byte length of the encoded call + * @returns promise of extrinsic execution + */ + async execute(signer: TSigner, proposal: any, lengthBound = 10000) { + const result = await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.execute`, [proposal, lengthBound]); + this.checkExecutedEvent(result.result.events); + return result; + } + + /** + * Attempt to close and execute a proposal. Note that there must already be enough votes to meet the threshold set when proposing. + * @param signer keyring of the executor. Can be absolutely anyone. + * @param proposalHash hash of the proposal to close + * @param proposalIndex index of the proposal generated on its creation + * @param weightBound weight of the proposed call. Can be obtained by calling `paymentInfo()` on the call. + * @param lengthBound byte length of the encoded call + * @returns promise of extrinsic execution and its result + */ + async close( + signer: TSigner, + proposalHash: string, + proposalIndex: number, + weightBound: [number, number] | any = [20_000_000_000, 1000_000], + lengthBound = 10_000, + ) { + const result = await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.close`, [ + proposalHash, + proposalIndex, + weightBound, + lengthBound, + ]); + this.checkExecutedEvent(result.result.events); + return result; + } + + /** + * Shut down a proposal, regardless of its current state. + * @param signer keyring of the disapprover. Must be root + * @param proposalHash hash of the proposal to close + * @returns promise of extrinsic execution and its result + */ + disapproveProposal(signer: TSigner, proposalHash: string) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.disapproveProposal`, [proposalHash]); + } +} + +export class CollectiveMembershipGroup extends HelperGroup { + /** + * Pallet name to make an API call to. Examples: 'councilMembership', 'technicalCommitteeMembership' + */ + private membership: string; + + constructor(helper: UniqueHelper, membership: string) { + super(helper); + this.membership = membership; + } + + /** + * Returns an array of members' addresses according to the membership pallet's perception. + * Note that it does not recognize the original pallet's members set with `setMembers()`. + */ + async getMembers() { + return (await this.helper.callRpc(`api.query.${this.membership}.members`, [])).toHuman(); + } + + /** + * Returns the optional address of the prime member of the collective. + */ + async getPrimeMember() { + return (await this.helper.callRpc(`api.query.${this.membership}.prime`, [])).toHuman(); + } + + /** + * Add a member to the collective. + * @param signer keyring of the setter. Must be root + * @param member address of the member to add + * @returns promise of extrinsic execution and its result + */ + addMember(signer: TSigner, member: string) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.membership}.addMember`, [member]); + } + + addMemberCall(member: string) { + return this.helper.constructApiCall(`api.tx.${this.membership}.addMember`, [member]); + } + + /** + * Remove a member from the collective. + * @param signer keyring of the setter. Must be root + * @param member address of the member to remove + * @returns promise of extrinsic execution and its result + */ + removeMember(signer: TSigner, member: string) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.membership}.removeMember`, [member]); + } + + removeMemberCall(member: string) { + return this.helper.constructApiCall(`api.tx.${this.membership}.removeMember`, [member]); + } + + /** + * Set members of the collective to the given list of addresses. + * @param signer keyring of the setter. Must be root (for the direct call, bypassing a public motion) + * @param members addresses of the members to set + * @returns promise of extrinsic execution and its result + */ + resetMembers(signer: TSigner, members: string[]) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.membership}.resetMembers`, [members]); + } + + /** + * Set the collective's prime member to the given address. + * @param signer keyring of the setter. Must be root (for the direct call, bypassing a public motion) + * @param prime address of the prime member of the collective + * @returns promise of extrinsic execution and its result + */ + setPrime(signer: TSigner, prime: string) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.membership}.setPrime`, [prime]); + } + + setPrimeCall(member: string) { + return this.helper.constructApiCall(`api.tx.${this.membership}.setPrime`, [member]); + } + + /** + * Remove the collective's prime member. + * @param signer keyring of the setter. Must be root (for the direct call, bypassing a public motion) + * @returns promise of extrinsic execution and its result + */ + clearPrime(signer: TSigner) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.membership}.clearPrime`, []); + } + + clearPrimeCall() { + return this.helper.constructApiCall(`api.tx.${this.membership}.clearPrime`, []); + } +} + +export class RankedCollectiveGroup extends HelperGroup { + /** + * Pallet name to make an API call to. Examples: 'FellowshipCollective' + */ + private collective: string; + + constructor(helper: UniqueHelper, collective: string) { + super(helper); + this.collective = collective; + } + + addMember(signer: TSigner, newMember: string) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.addMember`, [newMember]); + } + + addMemberCall(newMember: string) { + return this.helper.constructApiCall(`api.tx.${this.collective}.addMember`, [newMember]); + } + + removeMember(signer: TSigner, member: string, minRank: number) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.removeMember`, [member, minRank]); + } + + removeMemberCall(newMember: string, minRank: number) { + return this.helper.constructApiCall(`api.tx.${this.collective}.removeMember`, [newMember, minRank]); + } + + promote(signer: TSigner, member: string) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.promoteMember`, [member]); + } + + promoteCall(member: string) { + return this.helper.constructApiCall(`api.tx.${this.collective}.promoteMember`, [member]); + } + + demote(signer: TSigner, member: string) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.demoteMember`, [member]); + } + + demoteCall(newMember: string) { + return this.helper.constructApiCall(`api.tx.${this.collective}.demoteMember`, [newMember]); + } + + vote(signer: TSigner, pollIndex: number, aye: boolean) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.vote`, [pollIndex, aye]); + } + + async getMembers() { + return (await this.helper.getApi().query.fellowshipCollective.members.keys()) + .map((key) => key.args[0].toString()); + } + + async getMemberRank(member: string) { + return (await this.helper.callRpc('api.query.fellowshipCollective.members', [member])).toJSON().rank; + } +} + +export class ReferendaGroup extends HelperGroup { + /** + * Pallet name to make an API call to. Examples: 'FellowshipReferenda' + */ + private referenda: string; + + constructor(helper: UniqueHelper, referenda: string) { + super(helper); + this.referenda = referenda; + } + + submit( + signer: TSigner, + proposalOrigin: string, + proposal: any, + enactmentMoment: any, + ) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.referenda}.submit`, [ + {Origins: proposalOrigin}, + proposal, + enactmentMoment, + ]); + } + + placeDecisionDeposit(signer: TSigner, referendumIndex: number) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.referenda}.placeDecisionDeposit`, [referendumIndex]); + } + + cancel(signer: TSigner, referendumIndex: number) { + return this.helper.executeExtrinsic(signer, `api.tx.${this.referenda}.cancel`, [referendumIndex]); + } + + cancelCall(referendumIndex: number) { + return this.helper.constructApiCall(`api.tx.${this.referenda}.cancel`, [referendumIndex]); + } + + async referendumInfo(referendumIndex: number) { + return (await this.helper.callRpc(`api.query.${this.referenda}.referendumInfoFor`, [referendumIndex])).toJSON(); + } + + async enactmentEventId(referendumIndex: number) { + const api = await this.helper.getApi(); + + const bytes = api.createType('([u8;8], Text, u32)', ['assembly', 'enactment', referendumIndex]).toU8a(); + return blake2AsHex(bytes, 256); + } +} + +export interface IFellowshipGroup { + collective: RankedCollectiveGroup; + referenda: ReferendaGroup; +} + +export interface ICollectiveGroup { + collective: CollectiveGroup; + membership: CollectiveMembershipGroup; +} + +export class DemocracyGroup extends HelperGroup { + // todo displace proposal into types? + propose(signer: TSigner, call: any, deposit: bigint) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.propose', [{Inline: call.method.toHex()}, deposit]); + } + + proposeWithPreimage(signer: TSigner, preimage: string, deposit: bigint) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.propose', [{Legacy: preimage}, deposit]); + } + + proposeCall(call: any, deposit: bigint) { + return this.helper.constructApiCall('api.tx.democracy.propose', [{Inline: call.method.toHex()}, deposit]); + } + + second(signer: TSigner, proposalIndex: number) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.second', [proposalIndex]); + } + + externalPropose(signer: TSigner, proposalCall: any) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.externalPropose', [{Inline: proposalCall.method.toHex()}]); + } + + externalProposeMajority(signer: TSigner, proposalCall: any) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.externalProposeMajority', [{Inline: proposalCall.method.toHex()}]); + } + + externalProposeDefault(signer: TSigner, proposalCall: any) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.externalProposeDefault', [{Inline: proposalCall.method.toHex()}]); + } + + externalProposeDefaultWithPreimage(signer: TSigner, preimage: string) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.externalProposeDefault', [{Legacy: preimage}]); + } + + externalProposeCall(proposalCall: any) { + return this.helper.constructApiCall('api.tx.democracy.externalPropose', [{Inline: proposalCall.method.toHex()}]); + } + + externalProposeMajorityCall(proposalCall: any) { + return this.helper.constructApiCall('api.tx.democracy.externalProposeMajority', [{Inline: proposalCall.method.toHex()}]); + } + + externalProposeDefaultCall(proposalCall: any) { + return this.helper.constructApiCall('api.tx.democracy.externalProposeDefault', [{Inline: proposalCall.method.toHex()}]); + } + + externalProposeDefaultWithPreimageCall(preimage: string) { + return this.helper.constructApiCall('api.tx.democracy.externalProposeDefault', [{Legacy: preimage}]); + } + + // ... and blacklist external proposal hash. + vetoExternal(signer: TSigner, proposalHash: string) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.vetoExternal', [proposalHash]); + } + + vetoExternalCall(proposalHash: string) { + return this.helper.constructApiCall('api.tx.democracy.vetoExternal', [proposalHash]); + } + + blacklist(signer: TSigner, proposalHash: string, referendumIndex: number | null = null) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.blacklist', [proposalHash, referendumIndex]); + } + + blacklistCall(proposalHash: string, referendumIndex: number | null = null) { + return this.helper.constructApiCall('api.tx.democracy.blacklist', [proposalHash, referendumIndex]); + } + + // proposal. CancelProposalOrigin (root or all techcom) + cancelProposal(signer: TSigner, proposalIndex: number) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.cancelProposal', [proposalIndex]); + } + + cancelProposalCall(proposalIndex: number) { + return this.helper.constructApiCall('api.tx.democracy.cancelProposal', [proposalIndex]); + } + + clearPublicProposals(signer: TSigner) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.clearPublicProposals', []); + } + + fastTrack(signer: TSigner, proposalHash: string, votingPeriod: number, delayPeriod: number) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.fastTrack', [proposalHash, votingPeriod, delayPeriod]); + } + + fastTrackCall(proposalHash: string, votingPeriod: number, delayPeriod: number) { + return this.helper.constructApiCall('api.tx.democracy.fastTrack', [proposalHash, votingPeriod, delayPeriod]); + } + + // referendum. CancellationOrigin (TechCom member) + emergencyCancel(signer: TSigner, referendumIndex: number) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.emergencyCancel', [referendumIndex]); + } + + emergencyCancelCall(referendumIndex: number) { + return this.helper.constructApiCall('api.tx.democracy.emergencyCancel', [referendumIndex]); + } + + vote(signer: TSigner, referendumIndex: number, vote: any) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.vote', [referendumIndex, vote]); + } + + removeVote(signer: TSigner, referendumIndex: number, targetAccount?: string) { + if(targetAccount) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.removeOtherVote', [targetAccount, referendumIndex]); + } else { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.removeVote', [referendumIndex]); + } + } + + unlock(signer: TSigner, targetAccount: string) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.unlock', [targetAccount]); + } + + delegate(signer: TSigner, toAccount: string, conviction: PalletDemocracyConviction, balance: bigint) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.delegate', [toAccount, conviction, balance]); + } + + undelegate(signer: TSigner) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.undelegate', []); + } + + async referendumInfo(referendumIndex: number) { + return (await this.helper.callRpc('api.query.democracy.referendumInfoOf', [referendumIndex])).toJSON(); + } + + async publicProposals() { + return (await this.helper.callRpc('api.query.democracy.publicProps', [])).toJSON(); + } + + async findPublicProposal(proposalIndex: number) { + const proposalInfo = (await this.publicProposals()).find((proposalInfo: any[]) => proposalInfo[0] == proposalIndex); + + return proposalInfo ? proposalInfo[1] : null; + } + + async expectPublicProposal(proposalIndex: number) { + const proposal = await this.findPublicProposal(proposalIndex); + + if(proposal) { + return proposal; + } else { + throw Error(`Proposal #${proposalIndex} is expected to exist`); + } + } + + async getExternalProposal() { + return (await this.helper.callRpc('api.query.democracy.nextExternal', [])); + } + + async expectExternalProposal() { + const proposal = await this.getExternalProposal(); + + if(proposal) { + return proposal; + } else { + throw Error('An external proposal is expected to exist'); + } + } + + /* setMetadata? */ + + /* todo? + referendumVote(signer: TSigner, referendumIndex: number, accountVote: DemocracyStandardAccountVote) { + return this.helper.executeExtrinsic(signer, 'api.tx.democracy.vote', [referendumIndex, {Standard: accountVote}], true); + }*/ +} diff --git a/tests/src/util/playgrounds/unique.ts b/tests/src/util/playgrounds/unique.ts index 19bb7cf5fd..24bfc0d8d6 100644 --- a/tests/src/util/playgrounds/unique.ts +++ b/tests/src/util/playgrounds/unique.ts @@ -39,13 +39,11 @@ import { TSigner, TSubstrateAccount, TNetworks, - IForeignAssetMetadata, IEthCrossAccountId, - IPhasicEvent, } from './types'; import {RuntimeDispatchInfo} from '@polkadot/types/interfaces'; import type {Vec} from '@polkadot/types-codec'; -import {FrameSystemEventRecord, PalletDemocracyConviction} from '@polkadot/types/lookup'; +import {FrameSystemEventRecord} from '@polkadot/types/lookup'; export class CrossAccountId { Substrate!: TSubstrateAccount; @@ -2757,6 +2755,7 @@ class AddressGroup extends HelperGroup { } } + class StakingGroup extends HelperGroup { /** * Stake tokens for App Promotion @@ -2864,644 +2863,6 @@ class StakingGroup extends HelperGroup { } } -class SchedulerGroup extends HelperGroup { - constructor(helper: UniqueHelper) { - super(helper); - } - - cancelScheduled(signer: TSigner, scheduledId: string) { - return this.helper.executeExtrinsic( - signer, - 'api.tx.scheduler.cancelNamed', - [scheduledId], - true, - ); - } - - changePriority(signer: TSigner, scheduledId: string, priority: number) { - return this.helper.executeExtrinsic( - signer, - 'api.tx.scheduler.changeNamedPriority', - [scheduledId, priority], - true, - ); - } - - scheduleAt( - executionBlockNumber: number, - options: ISchedulerOptions = {}, - ) { - return this.schedule('schedule', executionBlockNumber, options); - } - - scheduleAfter( - blocksBeforeExecution: number, - options: ISchedulerOptions = {}, - ) { - return this.schedule('scheduleAfter', blocksBeforeExecution, options); - } - - schedule( - scheduleFn: 'schedule' | 'scheduleAfter', - blocksNum: number, - options: ISchedulerOptions = {}, - ) { - // eslint-disable-next-line @typescript-eslint/naming-convention - const ScheduledHelperType = ScheduledUniqueHelper(this.helper.helperBase); - return this.helper.clone(ScheduledHelperType, { - scheduleFn, - blocksNum, - options, - }) as T; - } -} - -class CollatorSelectionGroup extends HelperGroup { - //todo:collator documentation - addInvulnerable(signer: TSigner, address: string) { - return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.addInvulnerable', [address]); - } - - removeInvulnerable(signer: TSigner, address: string) { - return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.removeInvulnerable', [address]); - } - - async getInvulnerables(): Promise { - return (await this.helper.callRpc('api.query.collatorSelection.invulnerables')).map((x: any) => x.toHuman()); - } - - /** and also total max invulnerables */ - maxCollators(): number { - return (this.helper.getApi().consts.configuration.defaultCollatorSelectionMaxCollators.toJSON() as number); - } - - async getDesiredCollators(): Promise { - return (await this.helper.callRpc('api.query.configuration.collatorSelectionDesiredCollatorsOverride')).toNumber(); - } - - setLicenseBond(signer: TSigner, amount: bigint) { - return this.helper.executeExtrinsic(signer, 'api.tx.configuration.setCollatorSelectionLicenseBond', [amount]); - } - - async getLicenseBond(): Promise { - return (await this.helper.callRpc('api.query.configuration.collatorSelectionLicenseBondOverride')).toBigInt(); - } - - obtainLicense(signer: TSigner) { - return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.getLicense', []); - } - - releaseLicense(signer: TSigner) { - return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.releaseLicense', []); - } - - forceReleaseLicense(signer: TSigner, released: string) { - return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.forceReleaseLicense', [released]); - } - - async hasLicense(address: string): Promise { - return (await this.helper.callRpc('api.query.collatorSelection.licenseDepositOf', [address])).toBigInt(); - } - - onboard(signer: TSigner) { - return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.onboard', []); - } - - offboard(signer: TSigner) { - return this.helper.executeExtrinsic(signer, 'api.tx.collatorSelection.offboard', []); - } - - async getCandidates(): Promise { - return (await this.helper.callRpc('api.query.collatorSelection.candidates')).map((x: any) => x.toHuman()); - } -} - -class CollectiveGroup extends HelperGroup { - /** - * Pallet name to make an API call to. Examples: 'council', 'technicalCommittee' - */ - private collective: string; - - constructor(helper: UniqueHelper, collective: string) { - super(helper); - this.collective = collective; - } - - /** - * Check the result of a proposal execution for the success of the underlying proposed extrinsic. - * @param events events of the proposal execution - * @returns proposal hash - */ - private checkExecutedEvent(events: IPhasicEvent[]): string { - const executionEvents = events.filter(x => - x.event.section === this.collective && (x.event.method === 'Executed' || x.event.method === 'MemberExecuted')); - - if(executionEvents.length != 1) { - if(events.filter(x => x.event.section === this.collective && x.event.method === 'Disapproved').length > 0) - throw new Error(`Disapproved by ${this.collective}`); - else - throw new Error(`Expected one 'Executed' or 'MemberExecuted' event for ${this.collective}`); - } - - const result = (executionEvents[0].event.data as any).result; - - if(result.isErr) { - if(result.asErr.isModule) { - const error = result.asErr.asModule; - const metaError = this.helper.getApi()?.registry.findMetaError(error); - throw new Error(`Proposal execution failed with ${metaError.section}.${metaError.name}`); - } else { - throw new Error('Proposal execution failed with ' + result.asErr.toHuman()); - } - } - - return (executionEvents[0].event.data as any).proposalHash; - } - - /** - * Returns an array of members' addresses. - */ - async getMembers() { - return (await this.helper.callRpc(`api.query.${this.collective}.members`, [])).toHuman(); - } - - /** - * Returns the optional address of the prime member of the collective. - */ - async getPrimeMember() { - return (await this.helper.callRpc(`api.query.${this.collective}.prime`, [])).toHuman(); - } - - /** - * Returns an array of proposal hashes that are currently active for this collective. - */ - async getProposals() { - return (await this.helper.callRpc(`api.query.${this.collective}.proposals`, [])).toHuman(); - } - - /** - * Returns the call originally encoded under the specified hash. - * @param hash h256-encoded proposal - * @returns the optional call that the proposal hash stands for. - */ - async getProposalCallOf(hash: string) { - return (await this.helper.callRpc(`api.query.${this.collective}.proposalOf`, [hash])).toHuman(); - } - - /** - * Returns the total number of proposals so far. - */ - async getTotalProposalsCount() { - return (await this.helper.callRpc(`api.query.${this.collective}.proposalCount`, [])).toNumber(); - } - - /** - * Creates a new proposal up for voting. If the threshold is set to 1, the proposal will be executed immediately. - * @param signer keyring of the proposer - * @param proposal constructed call to be executed if the proposal is successful - * @param voteThreshold minimal number of votes for the proposal to be verified and executed - * @param lengthBound byte length of the encoded call - * @returns promise of extrinsic execution and its result - */ - async propose(signer: TSigner, proposal: any, voteThreshold: number, lengthBound = 10000) { - return await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.propose`, [voteThreshold, proposal, lengthBound]); - } - - /** - * Casts a vote to either approve or reject a proposal. - * @param signer keyring of the voter - * @param proposalHash hash of the proposal to be voted for - * @param proposalIndex absolute index of the proposal used for absolutely nothing but throwing pointless errors - * @param approve aye or nay - * @returns promise of extrinsic execution and its result - */ - vote(signer: TSigner, proposalHash: string, proposalIndex: number, approve: boolean) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.vote`, [proposalHash, proposalIndex, approve]); - } - - /** - * Executes a call immediately as a member of the collective. Needed for the Member origin. - * @param signer keyring of the executor member - * @param proposal constructed call to be executed by the member - * @param lengthBound byte length of the encoded call - * @returns promise of extrinsic execution - */ - async execute(signer: TSigner, proposal: any, lengthBound = 10000) { - const result = await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.execute`, [proposal, lengthBound]); - this.checkExecutedEvent(result.result.events); - return result; - } - - /** - * Attempt to close and execute a proposal. Note that there must already be enough votes to meet the threshold set when proposing. - * @param signer keyring of the executor. Can be absolutely anyone. - * @param proposalHash hash of the proposal to close - * @param proposalIndex index of the proposal generated on its creation - * @param weightBound weight of the proposed call. Can be obtained by calling `paymentInfo()` on the call. - * @param lengthBound byte length of the encoded call - * @returns promise of extrinsic execution and its result - */ - async close( - signer: TSigner, - proposalHash: string, - proposalIndex: number, - weightBound: [number, number] | any = [20_000_000_000, 1000_000], - lengthBound = 10_000, - ) { - const result = await this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.close`, [ - proposalHash, - proposalIndex, - weightBound, - lengthBound, - ]); - this.checkExecutedEvent(result.result.events); - return result; - } - - /** - * Shut down a proposal, regardless of its current state. - * @param signer keyring of the disapprover. Must be root - * @param proposalHash hash of the proposal to close - * @returns promise of extrinsic execution and its result - */ - disapproveProposal(signer: TSigner, proposalHash: string) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.disapproveProposal`, [proposalHash]); - } -} - -class CollectiveMembershipGroup extends HelperGroup { - /** - * Pallet name to make an API call to. Examples: 'councilMembership', 'technicalCommitteeMembership' - */ - private membership: string; - - constructor(helper: UniqueHelper, membership: string) { - super(helper); - this.membership = membership; - } - - /** - * Returns an array of members' addresses according to the membership pallet's perception. - * Note that it does not recognize the original pallet's members set with `setMembers()`. - */ - async getMembers() { - return (await this.helper.callRpc(`api.query.${this.membership}.members`, [])).toHuman(); - } - - /** - * Returns the optional address of the prime member of the collective. - */ - async getPrimeMember() { - return (await this.helper.callRpc(`api.query.${this.membership}.prime`, [])).toHuman(); - } - - /** - * Add a member to the collective. - * @param signer keyring of the setter. Must be root - * @param member address of the member to add - * @returns promise of extrinsic execution and its result - */ - addMember(signer: TSigner, member: string) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.membership}.addMember`, [member]); - } - - addMemberCall(member: string) { - return this.helper.constructApiCall(`api.tx.${this.membership}.addMember`, [member]); - } - - /** - * Remove a member from the collective. - * @param signer keyring of the setter. Must be root - * @param member address of the member to remove - * @returns promise of extrinsic execution and its result - */ - removeMember(signer: TSigner, member: string) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.membership}.removeMember`, [member]); - } - - removeMemberCall(member: string) { - return this.helper.constructApiCall(`api.tx.${this.membership}.removeMember`, [member]); - } - - /** - * Set members of the collective to the given list of addresses. - * @param signer keyring of the setter. Must be root (for the direct call, bypassing a public motion) - * @param members addresses of the members to set - * @returns promise of extrinsic execution and its result - */ - resetMembers(signer: TSigner, members: string[]) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.membership}.resetMembers`, [members]); - } - - /** - * Set the collective's prime member to the given address. - * @param signer keyring of the setter. Must be root (for the direct call, bypassing a public motion) - * @param prime address of the prime member of the collective - * @returns promise of extrinsic execution and its result - */ - setPrime(signer: TSigner, prime: string) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.membership}.setPrime`, [prime]); - } - - setPrimeCall(member: string) { - return this.helper.constructApiCall(`api.tx.${this.membership}.setPrime`, [member]); - } - - /** - * Remove the collective's prime member. - * @param signer keyring of the setter. Must be root (for the direct call, bypassing a public motion) - * @returns promise of extrinsic execution and its result - */ - clearPrime(signer: TSigner) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.membership}.clearPrime`, []); - } - - clearPrimeCall() { - return this.helper.constructApiCall(`api.tx.${this.membership}.clearPrime`, []); - } -} - -class RankedCollectiveGroup extends HelperGroup { - /** - * Pallet name to make an API call to. Examples: 'FellowshipCollective' - */ - private collective: string; - - constructor(helper: UniqueHelper, collective: string) { - super(helper); - this.collective = collective; - } - - addMember(signer: TSigner, newMember: string) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.addMember`, [newMember]); - } - - addMemberCall(newMember: string) { - return this.helper.constructApiCall(`api.tx.${this.collective}.addMember`, [newMember]); - } - - removeMember(signer: TSigner, member: string, minRank: number) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.removeMember`, [member, minRank]); - } - - removeMemberCall(newMember: string, minRank: number) { - return this.helper.constructApiCall(`api.tx.${this.collective}.removeMember`, [newMember, minRank]); - } - - promote(signer: TSigner, member: string) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.promoteMember`, [member]); - } - - promoteCall(member: string) { - return this.helper.constructApiCall(`api.tx.${this.collective}.promoteMember`, [member]); - } - - demote(signer: TSigner, member: string) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.demoteMember`, [member]); - } - - demoteCall(newMember: string) { - return this.helper.constructApiCall(`api.tx.${this.collective}.demoteMember`, [newMember]); - } - - vote(signer: TSigner, pollIndex: number, aye: boolean) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.collective}.vote`, [pollIndex, aye]); - } - - async getMembers() { - return (await this.helper.getApi().query.fellowshipCollective.members.keys()) - .map((key) => key.args[0].toString()); - } - - async getMemberRank(member: string) { - return (await this.helper.callRpc('api.query.fellowshipCollective.members', [member])).toJSON().rank; - } -} - -class ReferendaGroup extends HelperGroup { - /** - * Pallet name to make an API call to. Examples: 'FellowshipReferenda' - */ - private referenda: string; - - constructor(helper: UniqueHelper, referenda: string) { - super(helper); - this.referenda = referenda; - } - - submit( - signer: TSigner, - proposalOrigin: string, - proposal: any, - enactmentMoment: any, - ) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.referenda}.submit`, [ - {Origins: proposalOrigin}, - proposal, - enactmentMoment, - ]); - } - - placeDecisionDeposit(signer: TSigner, referendumIndex: number) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.referenda}.placeDecisionDeposit`, [referendumIndex]); - } - - cancel(signer: TSigner, referendumIndex: number) { - return this.helper.executeExtrinsic(signer, `api.tx.${this.referenda}.cancel`, [referendumIndex]); - } - - cancelCall(referendumIndex: number) { - return this.helper.constructApiCall(`api.tx.${this.referenda}.cancel`, [referendumIndex]); - } - - async referendumInfo(referendumIndex: number) { - return (await this.helper.callRpc(`api.query.${this.referenda}.referendumInfoFor`, [referendumIndex])).toJSON(); - } - - async enactmentEventId(referendumIndex: number) { - const api = await this.helper.getApi(); - - const bytes = api.createType('([u8;8], Text, u32)', ['assembly', 'enactment', referendumIndex]).toU8a(); - return blake2AsHex(bytes, 256); - } -} - -export interface IFellowshipGroup { - collective: RankedCollectiveGroup; - referenda: ReferendaGroup; -} - -export interface ICollectiveGroup { - collective: CollectiveGroup; - membership: CollectiveMembershipGroup; -} - -class DemocracyGroup extends HelperGroup { - // todo displace proposal into types? - propose(signer: TSigner, call: any, deposit: bigint) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.propose', [{Inline: call.method.toHex()}, deposit]); - } - - proposeWithPreimage(signer: TSigner, preimage: string, deposit: bigint) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.propose', [{Legacy: preimage}, deposit]); - } - - proposeCall(call: any, deposit: bigint) { - return this.helper.constructApiCall('api.tx.democracy.propose', [{Inline: call.method.toHex()}, deposit]); - } - - second(signer: TSigner, proposalIndex: number) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.second', [proposalIndex]); - } - - externalPropose(signer: TSigner, proposalCall: any) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.externalPropose', [{Inline: proposalCall.method.toHex()}]); - } - - externalProposeMajority(signer: TSigner, proposalCall: any) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.externalProposeMajority', [{Inline: proposalCall.method.toHex()}]); - } - - externalProposeDefault(signer: TSigner, proposalCall: any) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.externalProposeDefault', [{Inline: proposalCall.method.toHex()}]); - } - - externalProposeDefaultWithPreimage(signer: TSigner, preimage: string) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.externalProposeDefault', [{Legacy: preimage}]); - } - - externalProposeCall(proposalCall: any) { - return this.helper.constructApiCall('api.tx.democracy.externalPropose', [{Inline: proposalCall.method.toHex()}]); - } - - externalProposeMajorityCall(proposalCall: any) { - return this.helper.constructApiCall('api.tx.democracy.externalProposeMajority', [{Inline: proposalCall.method.toHex()}]); - } - - externalProposeDefaultCall(proposalCall: any) { - return this.helper.constructApiCall('api.tx.democracy.externalProposeDefault', [{Inline: proposalCall.method.toHex()}]); - } - - externalProposeDefaultWithPreimageCall(preimage: string) { - return this.helper.constructApiCall('api.tx.democracy.externalProposeDefault', [{Legacy: preimage}]); - } - - // ... and blacklist external proposal hash. - vetoExternal(signer: TSigner, proposalHash: string) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.vetoExternal', [proposalHash]); - } - - vetoExternalCall(proposalHash: string) { - return this.helper.constructApiCall('api.tx.democracy.vetoExternal', [proposalHash]); - } - - blacklist(signer: TSigner, proposalHash: string, referendumIndex: number | null = null) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.blacklist', [proposalHash, referendumIndex]); - } - - blacklistCall(proposalHash: string, referendumIndex: number | null = null) { - return this.helper.constructApiCall('api.tx.democracy.blacklist', [proposalHash, referendumIndex]); - } - - // proposal. CancelProposalOrigin (root or all techcom) - cancelProposal(signer: TSigner, proposalIndex: number) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.cancelProposal', [proposalIndex]); - } - - cancelProposalCall(proposalIndex: number) { - return this.helper.constructApiCall('api.tx.democracy.cancelProposal', [proposalIndex]); - } - - clearPublicProposals(signer: TSigner) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.clearPublicProposals', []); - } - - fastTrack(signer: TSigner, proposalHash: string, votingPeriod: number, delayPeriod: number) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.fastTrack', [proposalHash, votingPeriod, delayPeriod]); - } - - fastTrackCall(proposalHash: string, votingPeriod: number, delayPeriod: number) { - return this.helper.constructApiCall('api.tx.democracy.fastTrack', [proposalHash, votingPeriod, delayPeriod]); - } - - // referendum. CancellationOrigin (TechCom member) - emergencyCancel(signer: TSigner, referendumIndex: number) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.emergencyCancel', [referendumIndex]); - } - - emergencyCancelCall(referendumIndex: number) { - return this.helper.constructApiCall('api.tx.democracy.emergencyCancel', [referendumIndex]); - } - - vote(signer: TSigner, referendumIndex: number, vote: any) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.vote', [referendumIndex, vote]); - } - - removeVote(signer: TSigner, referendumIndex: number, targetAccount?: string) { - if(targetAccount) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.removeOtherVote', [targetAccount, referendumIndex]); - } else { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.removeVote', [referendumIndex]); - } - } - - unlock(signer: TSigner, targetAccount: string) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.unlock', [targetAccount]); - } - - delegate(signer: TSigner, toAccount: string, conviction: PalletDemocracyConviction, balance: bigint) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.delegate', [toAccount, conviction, balance]); - } - - undelegate(signer: TSigner) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.undelegate', []); - } - - async referendumInfo(referendumIndex: number) { - return (await this.helper.callRpc('api.query.democracy.referendumInfoOf', [referendumIndex])).toJSON(); - } - - async publicProposals() { - return (await this.helper.callRpc('api.query.democracy.publicProps', [])).toJSON(); - } - - async findPublicProposal(proposalIndex: number) { - const proposalInfo = (await this.publicProposals()).find((proposalInfo: any[]) => proposalInfo[0] == proposalIndex); - - return proposalInfo ? proposalInfo[1] : null; - } - - async expectPublicProposal(proposalIndex: number) { - const proposal = await this.findPublicProposal(proposalIndex); - - if(proposal) { - return proposal; - } else { - throw Error(`Proposal #${proposalIndex} is expected to exist`); - } - } - - async getExternalProposal() { - return (await this.helper.callRpc('api.query.democracy.nextExternal', [])); - } - - async expectExternalProposal() { - const proposal = await this.getExternalProposal(); - - if(proposal) { - return proposal; - } else { - throw Error('An external proposal is expected to exist'); - } - } - - /* setMetadata? */ - - /* todo? - referendumVote(signer: TSigner, referendumIndex: number, accountVote: DemocracyStandardAccountVote) { - return this.helper.executeExtrinsic(signer, 'api.tx.democracy.vote', [referendumIndex, {Standard: accountVote}], true); - }*/ -} class PreimageGroup extends HelperGroup { async getPreimageInfo(h256: string) { @@ -3572,168 +2933,6 @@ class PreimageGroup extends HelperGroup { } } -class ForeignAssetsGroup extends HelperGroup { - async register(signer: TSigner, ownerAddress: TSubstrateAccount, location: any, metadata: IForeignAssetMetadata) { - await this.helper.executeExtrinsic( - signer, - 'api.tx.foreignAssets.registerForeignAsset', - [ownerAddress, location, metadata], - true, - ); - } - - async update(signer: TSigner, foreignAssetId: number, location: any, metadata: IForeignAssetMetadata) { - await this.helper.executeExtrinsic( - signer, - 'api.tx.foreignAssets.updateForeignAsset', - [foreignAssetId, location, metadata], - true, - ); - } -} - -export class XcmGroup extends HelperGroup { - palletName: string; - - constructor(helper: T, palletName: string) { - super(helper); - - this.palletName = palletName; - } - - async limitedReserveTransferAssets(signer: TSigner, destination: any, beneficiary: any, assets: any, feeAssetItem: number, weightLimit: any) { - await this.helper.executeExtrinsic(signer, `api.tx.${this.palletName}.limitedReserveTransferAssets`, [destination, beneficiary, assets, feeAssetItem, weightLimit], true); - } - - async setSafeXcmVersion(signer: TSigner, version: number) { - await this.helper.executeExtrinsic(signer, `api.tx.${this.palletName}.forceDefaultXcmVersion`, [version], true); - } - - async teleportAssets(signer: TSigner, destination: any, beneficiary: any, assets: any, feeAssetItem: number) { - await this.helper.executeExtrinsic(signer, `api.tx.${this.palletName}.teleportAssets`, [destination, beneficiary, assets, feeAssetItem], true); - } - - async teleportNativeAsset(signer: TSigner, destinationParaId: number, targetAccount: Uint8Array, amount: bigint, xcmVersion = 3) { - const destinationContent = { - parents: 0, - interior: { - X1: { - Parachain: destinationParaId, - }, - }, - }; - - const beneficiaryContent = { - parents: 0, - interior: { - X1: { - AccountId32: { - network: 'Any', - id: targetAccount, - }, - }, - }, - }; - - const assetsContent = [ - { - id: { - Concrete: { - parents: 0, - interior: 'Here', - }, - }, - fun: { - Fungible: amount, - }, - }, - ]; - - let destination; - let beneficiary; - let assets; - - if(xcmVersion == 2) { - destination = {V1: destinationContent}; - beneficiary = {V1: beneficiaryContent}; - assets = {V1: assetsContent}; - - } else if(xcmVersion == 3) { - destination = {V2: destinationContent}; - beneficiary = {V2: beneficiaryContent}; - assets = {V2: assetsContent}; - - } else { - throw Error('Unknown XCM version: ' + xcmVersion); - } - - const feeAssetItem = 0; - - await this.teleportAssets(signer, destination, beneficiary, assets, feeAssetItem); - } - - async send(signer: IKeyringPair, destination: any, message: any) { - await this.helper.executeExtrinsic( - signer, - `api.tx.${this.palletName}.send`, - [ - destination, - message, - ], - true, - ); - } -} - -export class XTokensGroup extends HelperGroup { - async transfer(signer: TSigner, currencyId: any, amount: bigint, destination: any, destWeight: any) { - await this.helper.executeExtrinsic(signer, 'api.tx.xTokens.transfer', [currencyId, amount, destination, destWeight], true); - } - - async transferMultiasset(signer: TSigner, asset: any, destination: any, destWeight: any) { - await this.helper.executeExtrinsic(signer, 'api.tx.xTokens.transferMultiasset', [asset, destination, destWeight], true); - } - - async transferMulticurrencies(signer: TSigner, currencies: any[], feeItem: number, destLocation: any, destWeight: any) { - await this.helper.executeExtrinsic(signer, 'api.tx.xTokens.transferMulticurrencies', [currencies, feeItem, destLocation, destWeight], true); - } -} - - - -export class TokensGroup extends HelperGroup { - async accounts(address: string, currencyId: any) { - const {free} = (await this.helper.callRpc('api.query.tokens.accounts', [address, currencyId])).toJSON() as any; - return BigInt(free); - } -} - -export class AssetsGroup extends HelperGroup { - async create(signer: TSigner, assetId: number, admin: string, minimalBalance: bigint) { - await this.helper.executeExtrinsic(signer, 'api.tx.assets.create', [assetId, admin, minimalBalance], true); - } - - async setMetadata(signer: TSigner, assetId: number, name: string, symbol: string, decimals: number) { - await this.helper.executeExtrinsic(signer, 'api.tx.assets.setMetadata', [assetId, name, symbol, decimals], true); - } - - async mint(signer: TSigner, assetId: number, beneficiary: string, amount: bigint) { - await this.helper.executeExtrinsic(signer, 'api.tx.assets.mint', [assetId, beneficiary, amount], true); - } - - async account(assetId: string | number, address: string) { - const accountAsset = ( - await this.helper.callRpc('api.query.assets.account', [assetId, address]) - ).toJSON()! as any; - - if(accountAsset !== null) { - return BigInt(accountAsset['balance']); - } else { - return null; - } - } -} - class UtilityGroup extends HelperGroup { async batch(signer: TSigner, txs: any[]) { return await this.helper.executeExtrinsic(signer, 'api.tx.utility.batch', [txs]); @@ -3748,8 +2947,6 @@ class UtilityGroup extends HelperGroup { } } - - export type ChainHelperBaseConstructor = new (...args: any[]) => ChainHelperBase; export type UniqueHelperConstructor = new (...args: any[]) => UniqueHelper; @@ -3760,17 +2957,7 @@ export class UniqueHelper extends ChainHelperBase { rft: RFTGroup; ft: FTGroup; staking: StakingGroup; - scheduler: SchedulerGroup; - collatorSelection: CollatorSelectionGroup; - council: ICollectiveGroup; - technicalCommittee: ICollectiveGroup; - fellowship: IFellowshipGroup; - democracy: DemocracyGroup; preimage: PreimageGroup; - foreignAssets: ForeignAssetsGroup; - xcm: XcmGroup; - xTokens: XTokensGroup; - tokens: TokensGroup; utility: UtilityGroup; constructor(logger?: ILogger, options: { [key: string]: any } = {}) { @@ -3782,90 +2969,11 @@ export class UniqueHelper extends ChainHelperBase { this.rft = new RFTGroup(this); this.ft = new FTGroup(this); this.staking = new StakingGroup(this); - this.scheduler = new SchedulerGroup(this); - this.collatorSelection = new CollatorSelectionGroup(this); - this.council = { - collective: new CollectiveGroup(this, 'council'), - membership: new CollectiveMembershipGroup(this, 'councilMembership'), - }; - this.technicalCommittee = { - collective: new CollectiveGroup(this, 'technicalCommittee'), - membership: new CollectiveMembershipGroup(this, 'technicalCommitteeMembership'), - }; - this.fellowship = { - collective: new RankedCollectiveGroup(this, 'fellowshipCollective'), - referenda: new ReferendaGroup(this, 'fellowshipReferenda'), - }; - this.democracy = new DemocracyGroup(this); this.preimage = new PreimageGroup(this); - this.foreignAssets = new ForeignAssetsGroup(this); - this.xcm = new XcmGroup(this, 'polkadotXcm'); - this.xTokens = new XTokensGroup(this); - this.tokens = new TokensGroup(this); this.utility = new UtilityGroup(this); } } -// eslint-disable-next-line @typescript-eslint/naming-convention -function ScheduledUniqueHelper(Base: T) { - return class extends Base { - scheduleFn: 'schedule' | 'scheduleAfter'; - blocksNum: number; - options: ISchedulerOptions; - - constructor(...args: any[]) { - const logger = args[0] as ILogger; - const options = args[1] as { - scheduleFn: 'schedule' | 'scheduleAfter', - blocksNum: number, - options: ISchedulerOptions - }; - - super(logger); - - this.scheduleFn = options.scheduleFn; - this.blocksNum = options.blocksNum; - this.options = options.options; - } - - executeExtrinsic(sender: IKeyringPair, scheduledExtrinsic: string, scheduledParams: any[], expectSuccess?: boolean): Promise { - const scheduledTx = this.constructApiCall(scheduledExtrinsic, scheduledParams); - - const mandatorySchedArgs = [ - this.blocksNum, - this.options.periodic ? [this.options.periodic.period, this.options.periodic.repetitions] : null, - this.options.priority ?? null, - scheduledTx, - ]; - - let schedArgs; - let scheduleFn; - - if(this.options.scheduledId) { - schedArgs = [this.options.scheduledId!, ...mandatorySchedArgs]; - - if(this.scheduleFn == 'schedule') { - scheduleFn = 'scheduleNamed'; - } else if(this.scheduleFn == 'scheduleAfter') { - scheduleFn = 'scheduleNamedAfter'; - } - } else { - schedArgs = mandatorySchedArgs; - scheduleFn = this.scheduleFn; - } - - const extrinsic = 'api.tx.scheduler.' + scheduleFn; - - return super.executeExtrinsic( - sender, - extrinsic as any, - schedArgs, - expectSuccess, - ); - } - }; -} - export class UniqueBaseCollection { helper: UniqueHelper; collectionId: number; @@ -3974,22 +3082,6 @@ export class UniqueBaseCollection { async burn(signer: TSigner) { return await this.helper.collection.burn(signer, this.collectionId); } - - scheduleAt( - executionBlockNumber: number, - options: ISchedulerOptions = {}, - ) { - const scheduledHelper = this.helper.scheduler.scheduleAt(executionBlockNumber, options); - return new UniqueBaseCollection(this.collectionId, scheduledHelper); - } - - scheduleAfter( - blocksBeforeExecution: number, - options: ISchedulerOptions = {}, - ) { - const scheduledHelper = this.helper.scheduler.scheduleAfter(blocksBeforeExecution, options); - return new UniqueBaseCollection(this.collectionId, scheduledHelper); - } } export class UniqueNFTCollection extends UniqueBaseCollection { @@ -4083,22 +3175,6 @@ export class UniqueNFTCollection extends UniqueBaseCollection { async unnestToken(signer: TSigner, tokenId: number, fromTokenObj: IToken, toAddressObj: ICrossAccountId) { return await this.helper.nft.unnestToken(signer, {collectionId: this.collectionId, tokenId}, fromTokenObj, toAddressObj); } - - scheduleAt( - executionBlockNumber: number, - options: ISchedulerOptions = {}, - ) { - const scheduledHelper = this.helper.scheduler.scheduleAt(executionBlockNumber, options); - return new UniqueNFTCollection(this.collectionId, scheduledHelper); - } - - scheduleAfter( - blocksBeforeExecution: number, - options: ISchedulerOptions = {}, - ) { - const scheduledHelper = this.helper.scheduler.scheduleAfter(blocksBeforeExecution, options); - return new UniqueNFTCollection(this.collectionId, scheduledHelper); - } } export class UniqueRFTCollection extends UniqueBaseCollection { @@ -4204,22 +3280,6 @@ export class UniqueRFTCollection extends UniqueBaseCollection { async unnestToken(signer: TSigner, tokenId: number, fromTokenObj: IToken, toAddressObj: ICrossAccountId) { return await this.helper.rft.unnestToken(signer, {collectionId: this.collectionId, tokenId}, fromTokenObj, toAddressObj); } - - scheduleAt( - executionBlockNumber: number, - options: ISchedulerOptions = {}, - ) { - const scheduledHelper = this.helper.scheduler.scheduleAt(executionBlockNumber, options); - return new UniqueRFTCollection(this.collectionId, scheduledHelper); - } - - scheduleAfter( - blocksBeforeExecution: number, - options: ISchedulerOptions = {}, - ) { - const scheduledHelper = this.helper.scheduler.scheduleAfter(blocksBeforeExecution, options); - return new UniqueRFTCollection(this.collectionId, scheduledHelper); - } } export class UniqueFTCollection extends UniqueBaseCollection { @@ -4266,22 +3326,6 @@ export class UniqueFTCollection extends UniqueBaseCollection { async approveTokens(signer: TSigner, toAddressObj: ICrossAccountId, amount = 1n) { return await this.helper.ft.approveTokens(signer, this.collectionId, toAddressObj, amount); } - - scheduleAt( - executionBlockNumber: number, - options: ISchedulerOptions = {}, - ) { - const scheduledHelper = this.helper.scheduler.scheduleAt(executionBlockNumber, options); - return new UniqueFTCollection(this.collectionId, scheduledHelper); - } - - scheduleAfter( - blocksBeforeExecution: number, - options: ISchedulerOptions = {}, - ) { - const scheduledHelper = this.helper.scheduler.scheduleAfter(blocksBeforeExecution, options); - return new UniqueFTCollection(this.collectionId, scheduledHelper); - } } export class UniqueBaseToken { @@ -4322,22 +3366,6 @@ export class UniqueBaseToken { nestingAccount() { return this.collection.helper.util.getTokenAccount(this); } - - scheduleAt( - executionBlockNumber: number, - options: ISchedulerOptions = {}, - ) { - const scheduledCollection = this.collection.scheduleAt(executionBlockNumber, options); - return new UniqueBaseToken(this.tokenId, scheduledCollection); - } - - scheduleAfter( - blocksBeforeExecution: number, - options: ISchedulerOptions = {}, - ) { - const scheduledCollection = this.collection.scheduleAfter(blocksBeforeExecution, options); - return new UniqueBaseToken(this.tokenId, scheduledCollection); - } } export class UniqueNFToken extends UniqueBaseToken { @@ -4395,22 +3423,6 @@ export class UniqueNFToken extends UniqueBaseToken { async burnFrom(signer: TSigner, fromAddressObj: ICrossAccountId) { return await this.collection.burnTokenFrom(signer, this.tokenId, fromAddressObj); } - - scheduleAt( - executionBlockNumber: number, - options: ISchedulerOptions = {}, - ) { - const scheduledCollection = this.collection.scheduleAt(executionBlockNumber, options); - return new UniqueNFToken(this.tokenId, scheduledCollection); - } - - scheduleAfter( - blocksBeforeExecution: number, - options: ISchedulerOptions = {}, - ) { - const scheduledCollection = this.collection.scheduleAfter(blocksBeforeExecution, options); - return new UniqueNFToken(this.tokenId, scheduledCollection); - } } export class UniqueRFToken extends UniqueBaseToken { @@ -4480,20 +3492,4 @@ export class UniqueRFToken extends UniqueBaseToken { async burnFrom(signer: TSigner, fromAddressObj: ICrossAccountId, amount = 1n) { return await this.collection.burnTokenFrom(signer, this.tokenId, fromAddressObj, amount); } - - scheduleAt( - executionBlockNumber: number, - options: ISchedulerOptions = {}, - ) { - const scheduledCollection = this.collection.scheduleAt(executionBlockNumber, options); - return new UniqueRFToken(this.tokenId, scheduledCollection); - } - - scheduleAfter( - blocksBeforeExecution: number, - options: ISchedulerOptions = {}, - ) { - const scheduledCollection = this.collection.scheduleAfter(blocksBeforeExecution, options); - return new UniqueRFToken(this.tokenId, scheduledCollection); - } } diff --git a/tests/src/util/playgrounds/unique.xcm.ts b/tests/src/util/playgrounds/unique.xcm.ts index 376f6a86d2..12d736ae6c 100644 --- a/tests/src/util/playgrounds/unique.xcm.ts +++ b/tests/src/util/playgrounds/unique.xcm.ts @@ -1,9 +1,9 @@ - import {ApiPromise, WsProvider} from '@polkadot/api'; -import {AssetsGroup, ChainHelperBase, EthereumBalanceGroup, HelperGroup, SubstrateBalanceGroup, TokensGroup, UniqueHelper, XTokensGroup, XcmGroup} from './unique'; -import {ILogger, TSigner} from './types'; -import {SudoHelper} from './unique.dev'; -import {AcalaAssetMetadata, DemocracyStandardAccountVote, MoonbeamAssetInfo} from './types.xcm'; +import {IKeyringPair} from '@polkadot/types/types'; +import {ChainHelperBase, EthereumBalanceGroup, HelperGroup, SubstrateBalanceGroup, UniqueHelper} from './unique'; +import {ILogger, TSigner, TSubstrateAccount} from './types'; +import {AcalaAssetMetadata, DemocracyStandardAccountVote, IForeignAssetMetadata, MoonbeamAssetInfo} from './types.xcm'; + export class XcmChainHelper extends ChainHelperBase { async connect(wsEndpoint: string, _listeners?: any): Promise { @@ -102,6 +102,169 @@ class PolkadexXcmHelperGroup extends HelperGroup { await this.helper.executeExtrinsic(signer, 'api.tx.xcmHelper.whitelistToken', [assetId], true); } } + +export class ForeignAssetsGroup extends HelperGroup { + async register(signer: TSigner, ownerAddress: TSubstrateAccount, location: any, metadata: IForeignAssetMetadata) { + await this.helper.executeExtrinsic( + signer, + 'api.tx.foreignAssets.registerForeignAsset', + [ownerAddress, location, metadata], + true, + ); + } + + async update(signer: TSigner, foreignAssetId: number, location: any, metadata: IForeignAssetMetadata) { + await this.helper.executeExtrinsic( + signer, + 'api.tx.foreignAssets.updateForeignAsset', + [foreignAssetId, location, metadata], + true, + ); + } +} + +export class XcmGroup extends HelperGroup { + palletName: string; + + constructor(helper: T, palletName: string) { + super(helper); + + this.palletName = palletName; + } + + async limitedReserveTransferAssets(signer: TSigner, destination: any, beneficiary: any, assets: any, feeAssetItem: number, weightLimit: any) { + await this.helper.executeExtrinsic(signer, `api.tx.${this.palletName}.limitedReserveTransferAssets`, [destination, beneficiary, assets, feeAssetItem, weightLimit], true); + } + + async setSafeXcmVersion(signer: TSigner, version: number) { + await this.helper.executeExtrinsic(signer, `api.tx.${this.palletName}.forceDefaultXcmVersion`, [version], true); + } + + async teleportAssets(signer: TSigner, destination: any, beneficiary: any, assets: any, feeAssetItem: number) { + await this.helper.executeExtrinsic(signer, `api.tx.${this.palletName}.teleportAssets`, [destination, beneficiary, assets, feeAssetItem], true); + } + + async teleportNativeAsset(signer: TSigner, destinationParaId: number, targetAccount: Uint8Array, amount: bigint, xcmVersion = 3) { + const destinationContent = { + parents: 0, + interior: { + X1: { + Parachain: destinationParaId, + }, + }, + }; + + const beneficiaryContent = { + parents: 0, + interior: { + X1: { + AccountId32: { + network: 'Any', + id: targetAccount, + }, + }, + }, + }; + + const assetsContent = [ + { + id: { + Concrete: { + parents: 0, + interior: 'Here', + }, + }, + fun: { + Fungible: amount, + }, + }, + ]; + + let destination; + let beneficiary; + let assets; + + if(xcmVersion == 2) { + destination = {V1: destinationContent}; + beneficiary = {V1: beneficiaryContent}; + assets = {V1: assetsContent}; + + } else if(xcmVersion == 3) { + destination = {V2: destinationContent}; + beneficiary = {V2: beneficiaryContent}; + assets = {V2: assetsContent}; + + } else { + throw Error('Unknown XCM version: ' + xcmVersion); + } + + const feeAssetItem = 0; + + await this.teleportAssets(signer, destination, beneficiary, assets, feeAssetItem); + } + + async send(signer: IKeyringPair, destination: any, message: any) { + await this.helper.executeExtrinsic( + signer, + `api.tx.${this.palletName}.send`, + [ + destination, + message, + ], + true, + ); + } +} + +export class XTokensGroup extends HelperGroup { + async transfer(signer: TSigner, currencyId: any, amount: bigint, destination: any, destWeight: any) { + await this.helper.executeExtrinsic(signer, 'api.tx.xTokens.transfer', [currencyId, amount, destination, destWeight], true); + } + + async transferMultiasset(signer: TSigner, asset: any, destination: any, destWeight: any) { + await this.helper.executeExtrinsic(signer, 'api.tx.xTokens.transferMultiasset', [asset, destination, destWeight], true); + } + + async transferMulticurrencies(signer: TSigner, currencies: any[], feeItem: number, destLocation: any, destWeight: any) { + await this.helper.executeExtrinsic(signer, 'api.tx.xTokens.transferMulticurrencies', [currencies, feeItem, destLocation, destWeight], true); + } +} + + + +export class TokensGroup extends HelperGroup { + async accounts(address: string, currencyId: any) { + const {free} = (await this.helper.callRpc('api.query.tokens.accounts', [address, currencyId])).toJSON() as any; + return BigInt(free); + } +} + +export class AssetsGroup extends HelperGroup { + async create(signer: TSigner, assetId: number, admin: string, minimalBalance: bigint) { + await this.helper.executeExtrinsic(signer, 'api.tx.assets.create', [assetId, admin, minimalBalance], true); + } + + async setMetadata(signer: TSigner, assetId: number, name: string, symbol: string, decimals: number) { + await this.helper.executeExtrinsic(signer, 'api.tx.assets.setMetadata', [assetId, name, symbol, decimals], true); + } + + async mint(signer: TSigner, assetId: number, beneficiary: string, amount: bigint) { + await this.helper.executeExtrinsic(signer, 'api.tx.assets.mint', [assetId, beneficiary, amount], true); + } + + async account(assetId: string | number, address: string) { + const accountAsset = ( + await this.helper.callRpc('api.query.assets.account', [assetId, address]) + ).toJSON()! as any; + + if(accountAsset !== null) { + return BigInt(accountAsset['balance']); + } else { + return null; + } + } +} + export class RelayHelper extends XcmChainHelper { balance: SubstrateBalanceGroup; xcm: XcmGroup; From bd22bf4f1cc0470e93ef7d373399d05c7415a5cd Mon Sep 17 00:00:00 2001 From: PraetorP Date: Thu, 14 Sep 2023 13:05:13 +0000 Subject: [PATCH 021/143] fix(test): remove token schedulers from tests --- tests/src/maintenance.seqtest.ts | 23 +++++++++++-------- tests/src/scheduler.seqtest.ts | 29 ++++++++++++------------ tests/src/util/playgrounds/unique.dev.ts | 4 ++-- 3 files changed, 29 insertions(+), 27 deletions(-) diff --git a/tests/src/maintenance.seqtest.ts b/tests/src/maintenance.seqtest.ts index df563386c3..fb8d795d61 100644 --- a/tests/src/maintenance.seqtest.ts +++ b/tests/src/maintenance.seqtest.ts @@ -192,19 +192,22 @@ describe('Integration Test: Maintenance Functionality', () => { const blocksToWait = 6; // Scheduling works before the maintenance - await nftBeforeMM.scheduleAfter(blocksToWait, {scheduledId: scheduledIdBeforeMM}) - .transfer(bob, {Substrate: superuser.address}); + await helper.scheduler.scheduleAfter(blocksToWait, {scheduledId: scheduledIdBeforeMM}) + .nft.transferToken(bob, collection.collectionId, nftBeforeMM.tokenId, {Substrate: superuser.address}); + await helper.wait.newBlocks(blocksToWait + 1); expect(await nftBeforeMM.getOwner()).to.be.deep.equal({Substrate: superuser.address}); // Schedule a transaction that should occur *during* the maintenance - await nftDuringMM.scheduleAfter(blocksToWait, {scheduledId: scheduledIdDuringMM}) - .transfer(bob, {Substrate: superuser.address}); + await helper.scheduler.scheduleAfter(blocksToWait, {scheduledId: scheduledIdDuringMM}) + .nft.transferToken(bob, collection.collectionId, nftDuringMM.tokenId, {Substrate: superuser.address}); + // Schedule a transaction that should occur *after* the maintenance - await nftDuringMM.scheduleAfter(blocksToWait * 2, {scheduledId: scheduledIdBunkerThroughMM}) - .transfer(bob, {Substrate: superuser.address}); + await helper.scheduler.scheduleAfter(blocksToWait * 2, {scheduledId: scheduledIdBunkerThroughMM}) + .nft.transferToken(bob, collection.collectionId, nftDuringMM.tokenId, {Substrate: superuser.address}); + await helper.getSudo().executeExtrinsic(superuser, 'api.tx.maintenance.enable', []); expect(await maintenanceEnabled(helper.getApi()), 'MM is OFF when it should be ON').to.be.true; @@ -214,16 +217,16 @@ describe('Integration Test: Maintenance Functionality', () => { expect(await nftDuringMM.getOwner()).to.be.deep.equal({Substrate: bob.address}); // Any attempts to schedule a tx during the MM should be rejected - await expect(nftDuringMM.scheduleAfter(blocksToWait, {scheduledId: scheduledIdAttemptDuringMM}) - .transfer(bob, {Substrate: superuser.address})) + await expect(helper.scheduler.scheduleAfter(blocksToWait, {scheduledId: scheduledIdAttemptDuringMM}) + .nft.transferToken(bob, collection.collectionId, nftDuringMM.tokenId, {Substrate: superuser.address})) .to.be.rejectedWith(/Invalid Transaction: Transaction call is not expected/); await helper.getSudo().executeExtrinsic(superuser, 'api.tx.maintenance.disable', []); expect(await maintenanceEnabled(helper.getApi()), 'MM is ON when it should be OFF').to.be.false; // Scheduling works after the maintenance - await nftAfterMM.scheduleAfter(blocksToWait, {scheduledId: scheduledIdAfterMM}) - .transfer(bob, {Substrate: superuser.address}); + await helper.scheduler.scheduleAfter(blocksToWait, {scheduledId: scheduledIdAfterMM}) + .nft.transferToken(bob, collection.collectionId, nftAfterMM.tokenId, {Substrate: superuser.address}); await helper.wait.newBlocks(blocksToWait + 1); diff --git a/tests/src/scheduler.seqtest.ts b/tests/src/scheduler.seqtest.ts index 376b0c5c97..3ee9bf15e1 100644 --- a/tests/src/scheduler.seqtest.ts +++ b/tests/src/scheduler.seqtest.ts @@ -47,9 +47,8 @@ describe('Scheduling token and balance transfers', () => { const token = await collection.mintToken(alice); const scheduledId = scheduleKind == 'named' ? helper.arrange.makeScheduledId() : undefined; const blocksBeforeExecution = 4; - - await token.scheduleAfter(blocksBeforeExecution, {scheduledId}) - .transfer(alice, {Substrate: bob.address}); + await helper.scheduler.scheduleAfter(blocksBeforeExecution, {scheduledId}) + .nft.transferToken(alice, collection.collectionId, token.tokenId, {Substrate: bob.address}); const executionBlock = await helper.chain.getLatestBlockNumber() + blocksBeforeExecution + 1; expect(await token.getOwner()).to.be.deep.equal({Substrate: alice.address}); @@ -103,8 +102,8 @@ describe('Scheduling token and balance transfers', () => { expect(await token.getOwner()).to.be.deep.equal({Substrate: alice.address}); - await token.scheduleAfter(waitForBlocks, {scheduledId}) - .transfer(alice, {Substrate: bob.address}); + await helper.scheduler.scheduleAfter(waitForBlocks, {scheduledId}) + .nft.transferToken(alice, collection.collectionId, token.tokenId, {Substrate: bob.address}); const executionBlock = await helper.chain.getLatestBlockNumber() + waitForBlocks + 1; await helper.scheduler.cancelScheduled(alice, scheduledId); @@ -363,8 +362,8 @@ describe('Scheduling token and balance transfers', () => { const scheduledId = helper.arrange.makeScheduledId(); const waitForBlocks = 4; - await token.scheduleAfter(waitForBlocks, {scheduledId}) - .transfer(bob, {Substrate: alice.address}); + await helper.scheduler.scheduleAfter(waitForBlocks, {scheduledId}) + .nft.transferToken(bob, collection.collectionId, token.tokenId, {Substrate: alice.address}); const executionBlock = await helper.chain.getLatestBlockNumber() + waitForBlocks + 1; await helper.getSudo().scheduler.cancelScheduled(superuser, scheduledId); @@ -404,8 +403,8 @@ describe('Scheduling token and balance transfers', () => { const scheduledId = helper.arrange.makeScheduledId(); const waitForBlocks = 6; - await token.scheduleAfter(waitForBlocks, {scheduledId}) - .transfer(bob, {Substrate: alice.address}); + await helper.scheduler.scheduleAfter(waitForBlocks, {scheduledId}) + .nft.transferToken(bob, collection.collectionId, token.tokenId, {Substrate: alice.address}); const executionBlock = await helper.chain.getLatestBlockNumber() + waitForBlocks + 1; const priority = 112; @@ -583,8 +582,8 @@ describe('Negative Test: Scheduling', () => { const scheduledId = helper.arrange.makeScheduledId(); const waitForBlocks = 4; - await token.scheduleAfter(waitForBlocks, {scheduledId}) - .transfer(alice, {Substrate: bob.address}); + await helper.scheduler.scheduleAfter(waitForBlocks, {scheduledId}) + .nft.transferToken(alice, collection.collectionId, token.tokenId, {Substrate: bob.address}); const executionBlock = await helper.chain.getLatestBlockNumber() + waitForBlocks + 1; const scheduled = helper.scheduler.scheduleAfter(waitForBlocks, {scheduledId}); @@ -614,8 +613,8 @@ describe('Negative Test: Scheduling', () => { const scheduledId = helper.arrange.makeScheduledId(); const waitForBlocks = 4; - await token.scheduleAfter(waitForBlocks, {scheduledId}) - .transfer(alice, {Substrate: bob.address}); + await helper.scheduler.scheduleAfter(waitForBlocks, {scheduledId}) + .nft.transferToken(alice, collection.collectionId, token.tokenId, {Substrate: bob.address}); const executionBlock = await helper.chain.getLatestBlockNumber() + waitForBlocks + 1; await expect(helper.scheduler.cancelScheduled(bob, scheduledId)) @@ -655,8 +654,8 @@ describe('Negative Test: Scheduling', () => { const scheduledId = helper.arrange.makeScheduledId(); const waitForBlocks = 4; - await token.scheduleAfter(waitForBlocks, {scheduledId}) - .transfer(bob, {Substrate: alice.address}); + await helper.scheduler.scheduleAfter(waitForBlocks, {scheduledId}) + .nft.transferToken(bob, collection.collectionId, token.tokenId, {Substrate: alice.address}); const priority = 112; await expect(helper.scheduler.changePriority(alice, scheduledId, priority)) diff --git a/tests/src/util/playgrounds/unique.dev.ts b/tests/src/util/playgrounds/unique.dev.ts index 59fc41641c..997c541d74 100644 --- a/tests/src/util/playgrounds/unique.dev.ts +++ b/tests/src/util/playgrounds/unique.dev.ts @@ -609,7 +609,7 @@ export class DevAstarHelper extends AstarHelper { this.wait = new WaitGroup(this); } - getSudo() { + getSudo() { // eslint-disable-next-line @typescript-eslint/naming-convention const SudoHelperType = SudoHelper(this.helperBase); return this.clone(SudoHelperType) as T; @@ -636,7 +636,7 @@ export class DevAcalaHelper extends AcalaHelper { super(logger, options); this.wait = new WaitGroup(this); } - getSudo() { + getSudo() { // eslint-disable-next-line @typescript-eslint/naming-convention const SudoHelperType = SudoHelper(this.helperBase); return this.clone(SudoHelperType) as T; From 477a07dafafb5b57eeec3f162529e6599ee830c9 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Thu, 14 Sep 2023 13:59:16 +0000 Subject: [PATCH 022/143] fix(types): sudo for `DevShidenHelper` --- tests/src/util/playgrounds/unique.dev.ts | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/tests/src/util/playgrounds/unique.dev.ts b/tests/src/util/playgrounds/unique.dev.ts index 997c541d74..ee097ff13f 100644 --- a/tests/src/util/playgrounds/unique.dev.ts +++ b/tests/src/util/playgrounds/unique.dev.ts @@ -616,16 +616,7 @@ export class DevAstarHelper extends AstarHelper { } } -export class DevShidenHelper extends AstarHelper { - wait: WaitGroup; - - constructor(logger: { log: (msg: any, level: any) => void, level: any }, options: {[key: string]: any} = {}) { - options.helperBase = options.helperBase ?? DevShidenHelper; - - super(logger, options); - this.wait = new WaitGroup(this); - } -} +export class DevShidenHelper extends DevAstarHelper { } export class DevAcalaHelper extends AcalaHelper { wait: WaitGroup; From cc055511983a3b0a129f988ccd12db6ae99b7f01 Mon Sep 17 00:00:00 2001 From: Konstantin Astakhov Date: Fri, 15 Sep 2023 10:03:17 +0700 Subject: [PATCH 023/143] update toolchain version --- .devcontainer/Dockerfile | 4 ++-- .docker/additional/Dockerfile-chainql | 6 +++--- .docker/additional/Dockerfile-polkadot | 2 +- .github/workflows/forkless-update-data.yml | 4 ++-- .github/workflows/forkless-update-nodata.yml | 4 ++-- .github/workflows/node-only-update.yml | 4 ++-- .github/workflows/xcm.yml | 6 +++--- README.md | 8 ++++---- tests/README.md | 2 +- 9 files changed, 20 insertions(+), 20 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 32f7c15daa..35030d802e 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -13,8 +13,8 @@ RUN curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.37.2/install.sh | b [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" && \ nvm install v16.2.0 && \ npm install -g yarn && \ - rustup toolchain install nightly-2021-11-11 && \ - rustup default nightly-2021-11-11 && \ + rustup toolchain install nightly-2023-05-22 && \ + rustup default nightly-2023-05-22 && \ rustup target add wasm32-unknown-unknown && \ rustup component add rustfmt clippy && \ cargo install cargo-expand cargo-edit cargo-contract diff --git a/.docker/additional/Dockerfile-chainql b/.docker/additional/Dockerfile-chainql index 39033519e8..25d4a6e768 100644 --- a/.docker/additional/Dockerfile-chainql +++ b/.docker/additional/Dockerfile-chainql @@ -15,11 +15,11 @@ RUN apt-get update && \ RUN curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain none RUN rustup toolchain uninstall $(rustup toolchain list) && \ - rustup toolchain install nightly-2022-11-15 && \ - rustup default nightly-2022-11-15 && \ + rustup toolchain install nightly-2023-05-22 && \ + rustup default nightly-2023-05-22 && \ rustup target list --installed && \ rustup show -RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2022-11-15 +RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2023-05-22 RUN mkdir /unique_parachain WORKDIR /unique_parachain diff --git a/.docker/additional/Dockerfile-polkadot b/.docker/additional/Dockerfile-polkadot index 5ec9efb5c5..4d0783a128 100644 --- a/.docker/additional/Dockerfile-polkadot +++ b/.docker/additional/Dockerfile-polkadot @@ -2,7 +2,7 @@ FROM ubuntu:22.04 as rust-builder LABEL maintainer="Unique.Network" -ARG RUST_TOOLCHAIN=nightly-2022-10-09 +ARG RUST_TOOLCHAIN=nightly-2023-05-22 ENV CARGO_HOME="/cargo-home" ENV PATH="/cargo-home/bin:$PATH" diff --git a/.github/workflows/forkless-update-data.yml b/.github/workflows/forkless-update-data.yml index 53e9a1db43..ec56a62c1d 100644 --- a/.github/workflows/forkless-update-data.yml +++ b/.github/workflows/forkless-update-data.yml @@ -40,7 +40,7 @@ jobs: network {quartz}, mainnet_branch {${{ env.QUARTZ_MAINNET_BRANCH }}}, relay_branch {${{ env.KUSAMA_MAINNET_BRANCH }}}, runtime_features {quartz-runtime}, wasm_name {quartz}, fork_source {${{ env.QUARTZ_REPLICA_FROM }}} network {unique}, mainnet_branch {${{ env.UNIQUE_MAINNET_BRANCH }}}, relay_branch {${{ env.POLKADOT_MAINNET_BRANCH }}}, runtime_features {unique-runtime}, wasm_name {unique}, fork_source {${{ env.UNIQUE_REPLICA_FROM }}} - forkless-data-build: + forkless-data: needs: prepare-execution-matrix # The type of runner that the job will run on @@ -48,7 +48,7 @@ jobs: timeout-minutes: 1380 - name: ${{ matrix.network }}-data-build + name: ${{ matrix.network }}-data continue-on-error: true #Do not stop testing of matrix runs failed. As it decided during PR review - it required 50/50& Let's check it with false. diff --git a/.github/workflows/forkless-update-nodata.yml b/.github/workflows/forkless-update-nodata.yml index d9f91074ec..bcb0c60e3a 100644 --- a/.github/workflows/forkless-update-nodata.yml +++ b/.github/workflows/forkless-update-nodata.yml @@ -40,7 +40,7 @@ jobs: network {quartz}, mainnet_branch {${{ env.QUARTZ_MAINNET_BRANCH }}}, relay_branch {${{ env.KUSAMA_MAINNET_BRANCH }}}, runtime_features {quartz-runtime}, wasm_name {quartz} network {unique}, mainnet_branch {${{ env.UNIQUE_MAINNET_BRANCH }}}, relay_branch {${{ env.POLKADOT_MAINNET_BRANCH }}}, runtime_features {unique-runtime}, wasm_name {unique} - forkless-nodata-build: + forkless-nodata: needs: prepare-execution-matrix # The type of runner that the job will run on @@ -48,7 +48,7 @@ jobs: timeout-minutes: 1380 - name: ${{ matrix.network }}-nodata-build + name: ${{ matrix.network }}-nodata continue-on-error: true #Do not stop testing of matrix runs failed. As it decided during PR review - it required 50/50& Let's check it with false. diff --git a/.github/workflows/node-only-update.yml b/.github/workflows/node-only-update.yml index 027aa61c5f..3d52899c05 100644 --- a/.github/workflows/node-only-update.yml +++ b/.github/workflows/node-only-update.yml @@ -42,7 +42,7 @@ jobs: network {quartz}, mainnet_branch {${{ env.QUARTZ_MAINNET_BRANCH }}}, relay_branch {${{ env.KUSAMA_MAINNET_BRANCH }}}, runtime_features {quartz-runtime} network {unique}, mainnet_branch {${{ env.UNIQUE_MAINNET_BRANCH }}}, relay_branch {${{ env.POLKADOT_MAINNET_BRANCH }}}, runtime_features {unique-runtime} - node-only-update-build: + node-only-update: needs: prepare-execution-matrix # The type of runner that the job will run on @@ -50,7 +50,7 @@ jobs: timeout-minutes: 2880 # 48 hours for execution jobs. - name: ${{ matrix.network }}-build + name: ${{ matrix.network }} continue-on-error: true #Do not stop testing of matrix runs failed. As it decided during PR review - it required 50/50& Let's check it with false. diff --git a/.github/workflows/xcm.yml b/.github/workflows/xcm.yml index b6b829690c..1dde0e2b37 100644 --- a/.github/workflows/xcm.yml +++ b/.github/workflows/xcm.yml @@ -1,4 +1,4 @@ -name: xcm-testnet-build +name: xcm-testnet # Controls when the action will run. on: @@ -44,7 +44,7 @@ jobs: network {quartz}, relay_branch {${{ env.KUSAMA_MAINNET_BRANCH }}}, acala_version {${{ env.KARURA_BUILD_BRANCH }}}, moonbeam_version {${{ env.MOONRIVER_BUILD_BRANCH }}}, cumulus_version {${{ env.STATEMINE_BUILD_BRANCH }}}, astar_version {${{ env.SHIDEN_BUILD_BRANCH }}}, polkadex_version {${{ env.POLKADEX_BUILD_BRANCH }}}, runtest {testXcmQuartz}, runtime_features {quartz-runtime} network {unique}, relay_branch {${{ env.POLKADOT_MAINNET_BRANCH }}}, acala_version {${{ env.ACALA_BUILD_BRANCH }}}, moonbeam_version {${{ env.MOONBEAM_BUILD_BRANCH }}}, cumulus_version {${{ env.STATEMINT_BUILD_BRANCH }}}, astar_version {${{ env.ASTAR_BUILD_BRANCH }}}, polkadex_version {${{ env.POLKADEX_BUILD_BRANCH }}}, runtest {testXcmUnique}, runtime_features {unique-runtime} - xcm-build: + xcm: needs: prepare-execution-marix # The type of runner that the job will run on @@ -52,7 +52,7 @@ jobs: timeout-minutes: 600 - name: ${{ matrix.network }}-build + name: ${{ matrix.network }} continue-on-error: true #Do not stop testing of matrix runs failed. As it decided during PR review - it required 50/50& Let's check it with false. diff --git a/README.md b/README.md index 78703a74c6..75f2f58e40 100644 --- a/README.md +++ b/README.md @@ -52,17 +52,17 @@ curl https://sh.rustup.rs -sSf | sh 2. Remove all installed toolchains with `rustup toolchain list` and `rustup toolchain uninstall `. -3. Install toolchain nightly-2022-11-15 and make it default: +3. Install toolchain nightly-2023-05-22 and make it default: ```bash -rustup toolchain install nightly-2022-11-15 -rustup default nightly-2022-11-15 +rustup toolchain install nightly-2023-05-22 +rustup default nightly-2023-05-22 ``` 4. Add wasm target for nightly toolchain: ```bash -rustup target add wasm32-unknown-unknown --toolchain nightly-2022-11-15 +rustup target add wasm32-unknown-unknown --toolchain nightly-2023-05-22 ``` 5. Build: diff --git a/tests/README.md b/tests/README.md index 2adf063f4e..4dc71e0146 100644 --- a/tests/README.md +++ b/tests/README.md @@ -8,7 +8,7 @@ git clone https://github.com/paritytech/polkadot.git && cd polkadot git checkout release-v0.9.27 ``` -2. Build with nightly-2022-05-11 +2. Build with nightly-2023-05-22 ```bash cargo build --release ``` From ef615698da27229be8413ef813f6011a547dccb6 Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Fri, 22 Sep 2023 07:36:11 +0000 Subject: [PATCH 024/143] feature: check formating before push --- .githooks/pre-commit | 42 ++++++++++++++++++++++++++++++++++++++++++ .githooks/pre-push | 42 ++++++++++++++++++++++++++++++++++++++++++ init.sh | 1 + 3 files changed, 85 insertions(+) create mode 100755 .githooks/pre-commit create mode 100755 .githooks/pre-push create mode 100755 init.sh diff --git a/.githooks/pre-commit b/.githooks/pre-commit new file mode 100755 index 0000000000..906d25e9a2 --- /dev/null +++ b/.githooks/pre-commit @@ -0,0 +1,42 @@ +#!/bin/bash +# +# Pre-push hook verifying that inappropriate code will not be pushed. + +# Colors for the terminal output +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' # No Color + +echo "Starting cargo fmt.." +cargo fmt --check +FMT_EXIT="$?" + +# Check that prettier formatting rules are not violated. +if [[ "${FMT_EXIT}" = 0 ]]; then + echo -e "${GREEN}cargo fmt succeded${NC}" +else + echo -e "${RED}Commit error!${NC}" + echo "Please format the code via 'cargo fmt', cannot commit unformatted code" + exit 1 +fi + +STAGED_TEST_FILES=$(git diff --cached --name-only --diff-filter=ACM | grep ".ts$\|.js$") + +if [[ "$STAGED_TEST_FILES" = "" ]]; then + echo -e "${GREEN}eslint succeded${NC}" + exit 0 +fi + +echo "Starting eslint.." +./tests/node_modules/.bin/eslint --max-warnings 0 ${STAGED_TEST_FILES[@]} +ESLINT_EXIT="$?" + +if [[ "${ESLINT_EXIT}" = 0 ]]; then + echo -e "${GREEN}eslint succeded${NC}" +else + echo -e "${RED}Commit error!${NC}" + echo "Please format the code via 'yarn fix', cannot Commit unformatted code" + exit 1 +fi + +exit $? \ No newline at end of file diff --git a/.githooks/pre-push b/.githooks/pre-push new file mode 100755 index 0000000000..41fa3e0c99 --- /dev/null +++ b/.githooks/pre-push @@ -0,0 +1,42 @@ +#!/bin/bash +# +# Pre-push hook verifying that inappropriate code will not be pushed. + +# Colors for the terminal output +RED='\033[0;31m' +NC='\033[0m' # No Color + +# Check that prettier formatting rules are not violated. +if ! cargo fmt --check; then + echo -e "${RED}Push error!${NC}" + echo "Please format the code via 'cargo fmt', cannot push unformatted code" + exit 1 +fi + +STAGED_TEST_FILES=$(git diff --cached --name-only --diff-filter=ACM @{upstream}| grep ".ts$\|.js$") + +DIFF_EXIT="$?" + +if [[ "${DIFF_EXIT}" != 0 ]]; then + echo -e "${GREEN}eslint succeded${NC}" + exit 0 +fi + +if [[ "$STAGED_TEST_FILES" = "" ]]; then + echo -e "${GREEN}eslint succeded${NC}" + exit 0 +fi + +echo "Starting eslint.." +./tests/node_modules/.bin/eslint --max-warnings 0 ${STAGED_TEST_FILES[@]} +ESLINT_EXIT="$?" + +if [[ "${ESLINT_EXIT}" = 0 ]]; then + echo -e "${GREEN}eslint succeded${NC}" +else + echo -e "${RED}Commit error!${NC}" + echo "Please format the code via 'yarn fix', cannot Commit unformatted code" + exit 1 +fi + +exit $? diff --git a/init.sh b/init.sh new file mode 100755 index 0000000000..7f2315185c --- /dev/null +++ b/init.sh @@ -0,0 +1 @@ +git config --local core.hooksPath ./.githooks \ No newline at end of file From 1e4737b0e1007f73f40360bb474aef88ada538cd Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Fri, 22 Sep 2023 07:36:11 +0000 Subject: [PATCH 025/143] remove pre-push hook --- .githooks/pre-push | 42 ------------------------------------------ 1 file changed, 42 deletions(-) delete mode 100755 .githooks/pre-push diff --git a/.githooks/pre-push b/.githooks/pre-push deleted file mode 100755 index 41fa3e0c99..0000000000 --- a/.githooks/pre-push +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -# -# Pre-push hook verifying that inappropriate code will not be pushed. - -# Colors for the terminal output -RED='\033[0;31m' -NC='\033[0m' # No Color - -# Check that prettier formatting rules are not violated. -if ! cargo fmt --check; then - echo -e "${RED}Push error!${NC}" - echo "Please format the code via 'cargo fmt', cannot push unformatted code" - exit 1 -fi - -STAGED_TEST_FILES=$(git diff --cached --name-only --diff-filter=ACM @{upstream}| grep ".ts$\|.js$") - -DIFF_EXIT="$?" - -if [[ "${DIFF_EXIT}" != 0 ]]; then - echo -e "${GREEN}eslint succeded${NC}" - exit 0 -fi - -if [[ "$STAGED_TEST_FILES" = "" ]]; then - echo -e "${GREEN}eslint succeded${NC}" - exit 0 -fi - -echo "Starting eslint.." -./tests/node_modules/.bin/eslint --max-warnings 0 ${STAGED_TEST_FILES[@]} -ESLINT_EXIT="$?" - -if [[ "${ESLINT_EXIT}" = 0 ]]; then - echo -e "${GREEN}eslint succeded${NC}" -else - echo -e "${RED}Commit error!${NC}" - echo "Please format the code via 'yarn fix', cannot Commit unformatted code" - exit 1 -fi - -exit $? From cd77dae50bc3d242b99682c2645301c6beba6b99 Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Fri, 22 Sep 2023 07:38:34 +0000 Subject: [PATCH 026/143] fix: code review requests --- .envrc | 4 ++++ .githooks/pre-commit | 2 +- Makefile | 10 ++++++++++ README.md | 5 +++++ init.sh | 1 - 5 files changed, 20 insertions(+), 2 deletions(-) delete mode 100755 init.sh diff --git a/.envrc b/.envrc index 95f7214f35..ee9923e693 100644 --- a/.envrc +++ b/.envrc @@ -19,6 +19,10 @@ function check_bdk { fi } +if ! diff .githooks/pre-commit .git/hooks/pre-commit >/dev/null; then +echo -e "${RED}Hooks are updated, run make git-hooks${RESET}" +fi + watch_file .baedeker/.bdk-env/discover.env if test -f .baedeker/.bdk-env/discover.env; then check_bdk baedeker diff --git a/.githooks/pre-commit b/.githooks/pre-commit index 906d25e9a2..1ffff6c3ef 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Pre-push hook verifying that inappropriate code will not be pushed. diff --git a/Makefile b/Makefile index 9a72a1d0cf..856c6adc31 100644 --- a/Makefile +++ b/Makefile @@ -167,3 +167,13 @@ check: .PHONY: clippy clippy: cargo clippy --features=quartz-runtime,unique-runtime,try-runtime,runtime-benchmarks --tests + +.PHONY: git-hooks +git-hooks: + cp .githooks/pre-commit .git/hooks/pre-commit + +.PHONY: init +init: + make git-hooks + cd tests + yarn install diff --git a/README.md b/README.md index 75f2f58e40..f85f5bac68 100644 --- a/README.md +++ b/README.md @@ -159,6 +159,11 @@ pushd tests && yarn fix ; popd cd tests && yarn eslint --ext .ts,.js src/ ``` +### Enable checking of code style on commits +```bash +make git-hooks +``` + ## Karura token transfer diff --git a/init.sh b/init.sh deleted file mode 100755 index 7f2315185c..0000000000 --- a/init.sh +++ /dev/null @@ -1 +0,0 @@ -git config --local core.hooksPath ./.githooks \ No newline at end of file From 17ea847df73f7291919d6fe53dacfa9f274b80eb Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Fri, 22 Sep 2023 08:13:51 +0000 Subject: [PATCH 027/143] feat: add mint_bulk_cross --- pallets/common/src/eth.rs | 9 +++++ pallets/nonfungible/src/erc.rs | 30 +++++++++++++- pallets/nonfungible/src/stubs/UniqueNFT.raw | Bin 6172 -> 6443 bytes pallets/nonfungible/src/stubs/UniqueNFT.sol | 18 ++++++++- pallets/refungible/src/erc.rs | 37 ++++++++++++++++++ .../refungible/src/stubs/UniqueRefungible.raw | Bin 6172 -> 6443 bytes .../refungible/src/stubs/UniqueRefungible.sol | 18 ++++++++- .../src/eth/stubs/CollectionHelpers.raw | Bin 3715 -> 3715 bytes .../src/eth/stubs/CollectionHelpers.sol | 29 +++++++------- .../common/ethereum/sponsoring/refungible.rs | 1 + tests/src/eth/abi/nonFungible.json | 33 ++++++++++++++++ tests/src/eth/abi/reFungible.json | 33 ++++++++++++++++ tests/src/eth/api/CollectionHelpers.sol | 29 +++++++------- tests/src/eth/api/UniqueNFT.sol | 13 +++++- tests/src/eth/api/UniqueRefungible.sol | 13 +++++- 15 files changed, 228 insertions(+), 35 deletions(-) diff --git a/pallets/common/src/eth.rs b/pallets/common/src/eth.rs index 2b659a2c15..9d9afd5e00 100644 --- a/pallets/common/src/eth.rs +++ b/pallets/common/src/eth.rs @@ -631,3 +631,12 @@ impl From for up_data_structs::AccessMode { } } } + +/// Token minting parameters +#[derive(AbiCoder, Default, Debug)] +pub struct MintTokenData { + /// Minted token owner + pub owner: CrossAddress, + /// Minted token properties + pub properties: Vec, +} diff --git a/pallets/nonfungible/src/erc.rs b/pallets/nonfungible/src/erc.rs index bb617e5358..d6a1c494db 100644 --- a/pallets/nonfungible/src/erc.rs +++ b/pallets/nonfungible/src/erc.rs @@ -981,13 +981,41 @@ where Ok(true) } + /// @notice Function to mint a token. + /// @param data Array of pairs of token owner and token's properties for minted token + #[weight(>::create_multiple_items(data.len() as u32) + >::set_token_properties(data.len() as u32))] + fn mint_bulk_cross(&mut self, caller: Caller, data: Vec) -> Result { + let caller = T::CrossAccountId::from_eth(caller); + let budget = self + .recorder + .weight_calls_budget(>::find_parent()); + + let mut create_nft_data = Vec::with_capacity(data.len()); + for eth::MintTokenData { owner, properties } in data { + let owner = owner.into_sub_cross_account::()?; + create_nft_data.push(CreateItemData:: { + properties: properties + .into_iter() + .map(|property| property.try_into()) + .collect::>>()? + .try_into() + .map_err(|_| "too many properties")?, + owner, + }); + } + + >::create_multiple_items(self, &caller, create_nft_data, &budget) + .map_err(dispatch_to_evm::)?; + Ok(true) + } + /// @notice Function to mint multiple tokens with the given tokenUris. /// @dev `tokenIds` is array of pairs of token ID and token URI. Token IDs should be consecutive /// numbers and first number should be obtained with `nextTokenId` method /// @param to The new owner /// @param tokens array of pairs of token ID and token URI for minted tokens #[solidity(hide, rename_selector = "mintBulkWithTokenURI")] - #[weight(>::create_multiple_items(tokens.len() as u32) + >::set_token_properties(tokens.len() as u32))] + #[weight(>::create_multiple_items(tokens.len() as u32) + >::set_token_properties(tokens.len() as u32))] fn mint_bulk_with_token_uri( &mut self, caller: Caller, diff --git a/pallets/nonfungible/src/stubs/UniqueNFT.raw b/pallets/nonfungible/src/stubs/UniqueNFT.raw index 423c0fbb54756934627a04d836a5912f7a2349da..969df8bd6cf7779bb9c495b0db6ee37b9501b9f2 100644 GIT binary patch literal 6443 zcma)A36NCP741jQ46{#nPj?SwC~1KrE*LbiHK@@RgK;DN?!Yv$6Z0NR6u}W;lo*Wu zwPyqQ-7^fhB`COSVhl+T)QY7hV2Mi+Bb5M>m|&N?L{5-$aOqR3)Pni)Tj1?g!Z1uYdRR^s>yJ+VaWB}dDEE783W5RP*~IG zr=wtGIm6#fd)Saif0PHqNBB9jqX07^27cAqbFeVGqhRB|UZ|1M`9v!UOXAqQ2+G*F znN81Yj>WPC9vaWXMYnyz_#|hJ+ur8Ph1_n)g&be$@x^4RU`ox{P5GW8%bIfsZ8iwM zmY8cpOEwQ$IwtAPHsrenBtvob?hu;5~ z>xdfyBSH}`F1+fDhzLKAr!+q>L0{1Mn}^%cAhZvoeN zAP+f*UH~~8AwUzlzHaVT(v?ML@nXW%ycS?|#sgJD{*m}CW;Vj&H5ajdAcV7?k1CTpbb?k${7+;|LTmB&IEo1AU>btgs6ds9V zE(ZC?%vFt$eG25(HJ9%P`69?CtNs)Qxu^0NJl~G3b$jkS-_vg6xJ#fyRUWJNPTR5w z3_tY+Zn)`^b8y+q*dh{KYeCxgy#Amk;N0=RA3U=3t2ZwOdDNGsaN#WS=(3IGX&|o# zx%`CZz6i3PNc*oz2sHZwC;ibm8Wm&yiLJ|*zVtRo6Xe37=huVGdZc&RHS>Qk(>rl# z<7+Ft)z0wLO&~w?Po!hObtYuBCzhN9au&#Wt;a6``7MwOa>pMBfyutW`PO$Y!)4E4 zi%4)aft+c7sTo|`i5yybHCEd}&U$vG3Gzf=medCq1$BDYcfJjha^oEB&<{a=2=d$; z9;yZzIH=(#Uv;A*d=<9F4ZU;_PXAsy z2G5JY)x4|zagd}!{2ym^f;2#mOE!`0T3?`Mcz+m|ZNe6j*rFV*9T)pANXp#0Dp7az!1Z#nZe) z>f&-sqT$B&uv<#XbHEC&;QqR=S3cFJM?G~_J~b??d}@3fPgUy!Ei2B322#D?j;2#8 zC7bRd$-SaV+TWbV(R6~2Dtq7es;*0W_C?fD)hyGb)8-!h}}N$v%lEjMGY z`V-f;JYFdfi4e;zV(gU_evX_>EJ@FT$fd+`j~KhMl4TVZgNq_>(UTwDXtHuI)`+J- z-PnpzdyiB;#nC`Uu9c=hvzp^5W8@PlIl>Sh93{Mi_I$6)qXLBzHMHk@Y_xAxd~O{b zqQr#|s2Lo+n1mtJc(d~_Y~RKI){cy*Vxs0LBxZ7nbPc61=7rE_aq-XE%;;I&L_^JS zuA3+dljUUIx#*c;*AO*F^RA|qOEd?l-+V+QbWD-4UnUV(b4kPC_<&cICT-A-%*vs9 z@hfHFV*I!xw!%=-f=pJ{24@{x)v*uLy#r-5{82Z53iqBsOHtb>YNue%AyDuQudNF9 zN375>QxTJ>olDrkGYC!&iyolRE}8A-@JmnYSdNR@YZQ#B=_~{n+u>M3)Ea78)b^R8 z_KJ>zulNYZCdFZ-5kQ6^F+AQ#(t&+y9k*q58~&5yo2dPB9>bo)43;fg3~_{(fHV!s zkur;nL2o{BJccrsbQp#{FQ|~g5jd1WV2j%A@Lixn-BfjzmX_M<%ocUr5_JHnL7p0L2UvPQMx}Catr&Na3@)Zq`kSF4(=;P(IP3mAened-MUy-V zmvjC(d|AxD40a}u!FL4b9r4fe!;w+psQ2bSQ9n-9*$AN~>L;7h1d6!H(y}(Z5(sf$ z>>EK8Eg1ZdIgB-Adk6o`v!0whs^2E+9tB#|KWE9rF><2*Roh0mkwrEFk6^I0iCJCLA5=2B%@BYhpJ=m~sQ-_D zXGs$c5z)|KiTW%Ktb4fw^LCC`m)&wZl7W?MgSkC64r-XLWCk2IP_Uc`R_5eI!z@R7 z$js5Y>o8X|oKA5&e@J}?(MCp$Xfmfo9|E2?m3ud~ckzaz$BMq9`)TU^f?7%qDfQ%-n(8}`N zGGtg`lpwOPDdRIdgBI}xDRAj=U>8J$28zWRl zY$$oW>IAfMZWk&2n||(}vJGAXvXpVm4hpV(f-M=i(Ir*2GS!$D*``BE4vCZ!P5=75 z$Tp8t?8lBQxv2SU;y_?U^ToYItKSQN4W*UqnAdDl{(??Fh;8_%2^xQwq{+4wgwYpr;#~d1X`*dWVQ0u$6ip@ob75 z^&=&sO+4i_4_Q5`&`CAf(WBn0<|jH-)T@7JT+$paF*`a(oo=K>bUq5rlx+p5%&Sq? zvZJ`NqgW`Ts5k#O4pXTd#V3}uLlqXio6bRn6&b*0bpV{vUwZ2z`m{t65#70_p8x=f zf$wf;(r!0lWORSPcz8{}jfNnB%s?49z{nDc3D_2O5c|#* zIQA)+JW?*PrWwiG^ZLJty-)E-tAM2C*p9R-mjEa>W8LKS z)|huFvO!Ih@k{fLK_|DKqP7qpOlXE`Wx(XtS!!Lh&Q^^~_7f8D@?W$r>qjQRK^Qq@ zFEeu8(m-OeybCi39DRoD%+@W7MjX#sC$M@l>~(pyZr~xI)1;}9X$obpYmjU zT$ET!y>g@KmGONZL;P(Axr^#Rvy2~cc!rW!wsfwK?v{bJiSeFnV$vrefK0D|3v$9jx`kAXB zFY))h#P%?nQDSEvKO%8zN5RUiChS~|>7fh>9|ZdVf{Divob`QdJx!z~peP}K0GlmC^2?t0|MV8>vU1c(&dQ7AEP1z+!^deIw|4`rK?2k8K8@F&pwA>vRAD8&57~1J z&P-!MNz<*QE|P2TiQh^B(j5z5_(k&J-~+#tE|UB5 zqOBHZAH~;lGiRfDRJWUKNK>n!ev)ug7#E5(F0_rw8*}Itf%;!B-m4G6aSUz0pbb{K z;^TW4E?)Ni74s&=QhZBc-LLmfyWW^_NB6v+y)tF$Yi+xm?tgFd%+wVp-d*3fZ{dpT zmM*v;R2vC>rR(a&D^d%t>+D)C7IdXRFTQr!(nZ%U>g-yyFtxZd)hW6b-?-?15$yMnqVv?r52zT=W+%rG}STi{nz%Qa+a0?by|Ukw2`8+D8wFUFLwO! zlG|4D{cSv?^4Yvb<;U~TNRcyLpX3;64g8A={7xfXR*F1hMD;LM%tE!))IdrpJ4$$+DSU&nNpPY9xW&u80D(Lpj+4F`ksQJ&*O}NWjh>J6#BFm z^*m?zn`;Z}QuSvAFnos3+3iJy5!Lb4U|orYAG8-O{MQ3FG8&(3#t=yoyO+Qj3zpgR zf?}I2U*w^QJX~_xCQVFpW;<=i*z+K_2XZ0XcY0zeT`n5ZGj>a%yTtOwya9_1Ag^WS zSn!g~hnKcNzOxO5E}P}0-fpLjf0l-7eI5m%D3H+Gm@^Nti1aLEm2Kmpb9v~{7hFT# zP#6)4dUfG7Gom8=1fH^d|0J!W^G^?c6cg-|g4XfW^sDBBJQn2Jt8PC(CPEK^{4mq` zcaT5v$fT%KKz{tawGPOIAU{2S$10GkiQM}3)}*Mp*R|!UNs8L#*xG-`zB|A*4dg-l z;0};yf&8GZe*?%1e1R7?|MF!RJBTeJA$u6)OUI8~2d)UnJ(tbf1`A=3ufB9)BUUH) zvgAItmSXFVl?~@Xb{EL!R_$2{@-2|h-`2ht&9|yqeUU`0NQF zHQ&NXZ`wyTG2XcO5%5spGfb3AG90!4*8nex-FNd+mutg-e#(})RI<*B{j}SSy zVkuUmAm==@)&O~&Z;adr7gcilngu@sxew$_<>0L#KL9!F#s_Obe&P!>{^A@bCc+D_ zHDPe)0gxwqB)IMc`R(+FYd~%ya@#6&fpp=-YqlK#xxHEqp05H|%dW^HAjyT~zt8Ig zc@xM9>1L9(e1W==ePI~efGr}iMU7cMA^u;G6F@dB-0~Pm>Y#=v=OArC7c63TSxFJz zjvaR7g)JcWfUNC(#s>L$0YCv172#Jzc;^gWBkJ36w(GjviyS-_FZ0*2Ptuoep>rk~o>tSM9EymBO%5yYM zER9|S(KCo;P>f$#&9VrKfuZP&W1dDx+;=dxxOy*kfAv#hE^+#Xr1S8lu2%L4g+gwP zG9>IV!cWkipJ%BmXs`r@6}a%FY|4sUJ2opxj7(8?+m{v*2+i00qKn#g@%P%$12s(4 zZAL4m7RY2!!{S;H`Z_NDb(=XYuNjz|D(9MkV_+~pI zBB7&-jGss%j^dDqf%tif`1^3f|mty=OQQwH`48edO`1pSX`=e24xH=J&sGmt_!E-2%ibW4XXqUovRD5Y! z4a-SUpLapey@Z93Vk;a^iTb7PvZ!Bai2B*>Mc?sJj$W5T&!T_~cfs&PJxvN%y6do( z*DT~u-qJ+<^99`LY^Jk($z-S_yab3TXpW3gVhn!s$rEw+VriS|95@$WTF^Gi{2hVU`zF!^Ng(04xpgOuy65vi-6u)%Vk? zTP>Qw!6lUb=IAbI7|~;(YkdJ9(J+syNf|}TIsXQ}Jg&J6ai)*Lw-@^M`uhBGgAd++bOGHoPMc;+|OKR8OparHO#wlgabqz;#8q=;!zs>rKrX9t~`32E0b%n^-kR3{u{W$gS+uI zF7$$ETxwY|UjsZ>)gU}%P>04vW0%Y5HNR{N7#)VuVVO?R_?Ta6I*{Z$e$`m$V)QzB zjN?Ez0i9>?a|XSAHV>8Qmkuif?SjMRoBK2{mk~{MK~92nSU6W@9qA}hI-0)Yvg2G8 zE&LD>P0tp*!{L{(X)dwoBSohhHOv`(VRR+a6X5Oec0|*1*=t=E%bHZKrrU#^zV*Fk zH)EplCe#F=f{Ho@wMNs%fal?LJyw^;qxUeiq$1sAli{Ce`Yw+C*b;b0wCNpEFj6$K zWrgw*be{C!3wKjAM{EQq8qv`N6-q_^gbe9s7)KD@*nEP|I9Uw=J8G^pp|p0MD5?a!Pl}8jRRz29Mb5Jt4f8=m{-~6mt8qDQbsiI|E9_|A92}_URg>}%T(e(VMWVnJtecxD}V*3 z-PE^)-6QS-ON$cN5AY32u60*M%PPx2CYqlHR1!uiK0GkpQZ+kIvoTju;7(aFbU+9S zZgn$&zDa%8@=>81CHekDW~-P6`7T=(Ej#d&7&8S(UhZLe+YFcbn2L;wmc2S41M7o5 zY;XX^oD$7DCR#S4TbvC;yy5JnJ-|sl~jv9i)#&0=hNtNVL_N z7@weUf{aAxxUxq*V}2?qd@3&#S@~27ZJ;6&P&@~czUYcn!m2yxhr7@NmI);1p^8$} zOc!^|>M$ubj_@itNthiXio#a=wbzR&dcd!ghVN-Y<6%o8M)0yI@9Wa(8(Er_}Y#8w|0vz75?Q z40NKu0-Z)1C-xgx%(tqUSsc;WbMC5!K4L4|__H7x!+Jm|3uO(i}g|-O{ocDmmQ+b)ZvxhNnJ0%Y}4_tR!%SzajciZX^ho zFNx~9+9d-drZt|y9PSzP-mRB^pN3$3owrW+4-x+vdG0!n2CM^CVoLKIIyr;EwB965 z+kH(o8&HY{P3pwA3MkM|2}I+5{p0URH}R@18x`?Gu8^I@RU#4QnJyQa<&6g&iZMY> zQI!W25#NEkE#WRr8;xcT^$(hooWOQHLCKgC6&#Di0@u({k454Zches|1c{@CP7Jxb zWA6irhddb{Atb8M)EIWBMq;B+Z^KAH93&Dj)7z$5j0Eu~esz zRqTq=#~kI7SC!Z|fBa&0vi>A7cP-GqEXay2J9P|PXl0QdrhI-i$-Ob9t z6T-mcHH3lpZ16BpqW}McvNUhx&`-@Mh|~;e+g9;5R)h5};1)=mkc{I8K!CWZW8Jb6 z@`bD`bbwXHz! zl0|AaocIrPAHnNkL$v^-wQU9)RNU22A4xbF49*e_&eWlTekEO&X%h6{7v`Ht9K!%C zDg)K2_+r!IWj9>Adj8aSX6KS|ch0g-fBX+0Z8?1L#p`ztjl6c+%(>::create_multiple_items(token_properties.len() as u32) + >::set_token_properties(token_properties.len() as u32))] + fn mint_bulk_cross( + &mut self, + caller: Caller, + token_properties: Vec, + ) -> Result { + let caller = T::CrossAccountId::from_eth(caller); + let budget = self + .recorder + .weight_calls_budget(>::find_parent()); + + let mut create_rft_data = Vec::with_capacity(token_properties.len()); + for eth::MintTokenData { owner, properties } in token_properties { + let owner = owner.into_sub_cross_account::()?; + let users: BoundedBTreeMap<_, _, _> = [(owner, 1)] + .into_iter() + .collect::>() + .try_into() + .map_err(|_| "too many users")?; + create_rft_data.push(CreateItemData:: { + properties: properties + .into_iter() + .map(|property| property.try_into()) + .collect::>>()? + .try_into() + .map_err(|_| "too many properties")?, + users, + }); + } + + >::create_multiple_items(self, &caller, create_rft_data, &budget) + .map_err(dispatch_to_evm::)?; + Ok(true) + } + /// @notice Function to mint multiple tokens with the given tokenUris. /// @dev `tokenIds` is array of pairs of token ID and token URI. Token IDs should be consecutive /// numbers and first number should be obtained with `nextTokenId` method diff --git a/pallets/refungible/src/stubs/UniqueRefungible.raw b/pallets/refungible/src/stubs/UniqueRefungible.raw index d12291ca9e95efa862f6c5435b3a99ef09cc41c6..1d811799708702bf4cb012465768ec4d0d315a11 100644 GIT binary patch literal 6443 zcma)A32;>99p8^lLhkHtb{Eo6S<&%Uv~{(p)fL5hQQtmE7Ts3Ak3a>%h!CMxtMA;+ z!SQwz1J+tW9B(_VS_kErT91lVv_q{{8w3{UAPKG|lxdXmm~st}>O$8w4WM0sj3@YZ#Cz9VPo9h@9Ho$KH+_mJVXGTTnQGg$(y59wOiwDL< z%>sb``tEWE;BtWb&wPFfz!d~OvFnMrs9NL7a@oX1^)g)TzIErVz&Z!u9(&L80A~W+ zTQj&8VBRNqX~Vs*LD(K#5s0e@z*mnPxenkF0C!x{u@MrQ0lxm~c?~!{#iu3parG9i zwv^YN0lDu3d~V5(8vuR^@Wq=u-vPlmpWya4?>rawmT~p)^qU(23J(OqW&)Re_5wJP z+>I+Q+lkYc06taqRv6%$m25nf)=j(^a;tK{y61!^<^k)M03WRW`6irR6;UznAGlH;TX-Bm1K_oDemosu)&o6`iyHs9)Kj55eAEVj{{}+$Zy)>w z2+Mqq#0JkZa5cO2@Pz;`2iT<^ei#VIS96D-TO--XPM~#7w@zw?u&uZv5Lc8}(}x$$ z2W>mR8P6;?03HkQvPCa{0Pu8Q7^x2eN{F+RJwE|RDxH1fgVg}ZLFbx3yc7gYK0(9X z-*KWMd=0LSNPgS{@H7tu!QBLIT!Ji+Djak5#`ghk!s&5;u@3@#tC9_#=iw^;(HY$U zmjaxaY$95nz~PsPHc0iFk)2@(+kh(qaYfOnof!KsK=Ml6Rrfy*@FD`Ao&gUcOfrcu=Z;S97r|~LLdp*j$W_@Qqgu_onc&|Jx z(c$xGFqy2ANKlVq1Z$Unoq#f7`>b4 zS7JARMA&`46`p?NqEQEp)w5+{B|X_aO5ZZA@=4wWTDIJRyXr3})AFlIf=Gl=ZWZIN zs_=8c0$|c$Z+~0hZBy>oU@!uj6M{!8QfPH`~OOq~0A~RJ~FMg#=2*!^yYAXySEy!fo z+JM)gRUQ8j$sH_X;E$2~CD?BUT8i36QF|oz9BK-_;k8x4{iqchb}B*=wH?$ucm}~y zap*M^x+Sw66~DB!hU2)Xz1D4G?sgV}i>+`hA!@w|Bx?H&QF~=)-dB7SV}nbmrx8Gg zB{AHlCy8Oddk$$?&4T~r`X*}sn!~bZGo57%CPN&dC0d$-#-KN!+=iu$C2fYK z&kHJOzyeGu1eU1X1m6WJ)E(uDQqod;&D8|0&sD^;3=!lR(->bfbKy?!jL9V#0{2~wc(XOi2G#Ua~%Z#nK1Zs={(O4 z?BSyR+8lnQOodmsQ+~Pr)m(qo7hQV8^(?H{B-tkfGAKV>3rIoJw&* zAuGeTBjhsbJY-PX#YDr6E*CGOWvszz6*#SuJ`xS<{rFLpkwSQfv5=ZbU{d2rj{#5N zW7mh}! zK$skZ;@!A9z`1KhpH=j5^ggB*RQRxDGW-*b6Hw2Uehuob@l{XvbV(t<1nDF-*y~=3 z#sfBt6AgIJqWV|L@`W*^n_<)yQmiTAAvS$e+F+)6ogNWQC%DH*jSTEXB!QaoQ7@FJ zNTsGuii*raOp=T&d%Olhb;O1;!YeuSOU|tV<#bcQ=d#+8nN$XwyCfTG9U z4&(<-3ruEt)--d=fG3(BMgOD5r}#C`bW7FjEDgL&S%Eqw#n90g5OAxPp_7}Gcg?$U zy$H!UZOm5D;-H%E&e%G9gKM=h_wN2N8xu7|uhxiOAR@ubs$rfl>+ zgAhL&2}ta$BfG86#CUAr6=N`Rl}l?JsvLb+kolAeU}R+z31o~4O91f9^1QURy# zoL^T&x>zPUIS-YUyk@#B%B(b#Y~wJmjH1Kr5Rs?H>b2jqDRR({l!!L*ls7zNp}9SQ zRFe}udaQ4XKcD4v z+sw`SDBf1nHWg#^E)t1iEHLz?6+w@U{>D2O(Jiv+i0HPJ11K~~Uwn7NkamjHPNO>m z#JwvAEX)qs^K?{-JuOeZN&bI<$(&(Y&maZe*9y{yHjpZ5wU6bga&!YhBA7S z%_zPc|M|Wl^<&=G8WFpuX!N?>p4#p0r6II2Hy&OU#{B*lbG#~~miHy?d$e3)TilrV zQZ+M8a(B3=Mj8o4V>BjW-uxG_eH4$h8<4bI+mUwV5_*rtSdr9jjd@Iw4emx6za(et z#JTlE_X_dBhNiny5AC~khI=krXSy>_jvgZL@?W$r9zZ6+L0CEE*wfXbG?0)?PhbXt zqtB52{2ba6TJQ7~!MSLCWK1U2xS7N=sNWPO_e#p8)>nK^HXBm%2JI(t46Tg4nx;&v zKmS@km1^S4V>%+@-ghoLjRh(m$bI%2YKd zMN$ISG?|?-eI#g)ak&7DHn5``p{H3sZcvN8|TJ*-L4xu5-s~ z{2dP={(+6$MRj1V#`oJiL&=Lrn5B&!tbw+PHjg$j<%=zX(!O$oKRo#K?6$J6n{JxB zRwYJqeU=<=1$1&P+gngYqTE;X$6G(Wz=Vq{n2HxZOLYcV*)A&s%yC;@;=`Qm?O_a~ z#I_uMMB?Pmys56B-njycMi~@-6YNJ5Ou(ngntry1HqsKhD0Th>UMrN+9c#VLFURBm zGh1jN%dIFmBPWtGq-(bMogjn|%F&Lod@!%F==WX;k!GxSL%MKhC{ zNUp>uelv-dZkzbRFOm-hANXyRyWjWikvw>tGKKcV#2tbK8s!vX#Vx#y?PHE$FL#fm7z*ke16Z|1&e>Q zboP{3s=c}GsguTUJ$kwN+SK!>hn&yGTQU8^j{bjczxwhm5B$!VyY%`+UFU^rBcX5i q%wMoH)pdP$&l1tqlLEZpy2Xp;T{o}0XWrb@g6>qe=vlCA-v0m`h;{S; literal 6172 zcma)A3viUx72fM6A-j3)TQF1>YS9r#+Ug<-(nSZwN6WuElBF){y#y>2hynq$R{B3T z2?6`>CIo!72<;%Yc3MRXI>kCZ#`j~;YPCGt3YOM7qgb(Oq~E#!BO3>`z%J*%_ndRj z^E-Ecet>85d^}e*EpN{YocJN%d@g680^@8Q-+yTKQ z&Ae$DKhVkpc|M)j7ieqglPe*I?sTT#J2D5&iZPrPyt$df?sUw-qHs0iE(^4)aD z-$DM^B@?1f0r~#ZZ26AP;*2l_&Y#@VA1k?X`D23{tox)Jy~U zt5qLA334RJo$F`qgY54>?y7k+2=b9?HF&-STN_@#eY$Ji=8>1egqq8-y5)==3&60| z6IglUrBiX(3)mtOT&qBucfESID`4HW?N2UQesKRKAV2VADP7QY6$F-__SmT)RU*x| zQs4@DToXIzem^Qscne#~eTz>Axf9l`NiLB0cqjz4aH z4-AVvjid(8=V5D3eC%S7GeNfJ$DRxU3efz4$2Uqfvi;Te3(PZ{aoGLXA`)9$h#Xk5 z5V8@FGag;7gFG4JtR>IA4e|`{Fftzk`>-`tIs7A#WYV;i+iO8mrA{}lx*P&0cmfSS zpKM1(a4xpKo_et+BC$nWDT(BgPc$tQA4Lg41spj}Q$ zJzxcwe|~5}^;3BCu%}}6Q{z@V)vWRDs?vrtr9r;d#>3T;O@KR5^O&gFP0lsP@ifft z=2_L;&94ym8rKWYJ_^xLfQH-Ie5_i&9)1tWx1_6|{Ky5%QmxWPSr6t^PH67^FFEqE5m$z#!l z5ZWcT?L5BJjEd!isLwgTA0oXZ)xSk<{JZo6wRcM;)Rp??vgc5DVexB~L z`&gz=W~KUmT6L>MG1$0-(%&rIC3QV=B6O`U;3Gn_DVr2ggq-uQ;LG8f%iw3~IDC7d zZ?C7%OGnm8N9d?$_%RV$j1(#&9M+`^lyRM9WN)}F5a6y@_)J%56a-Ve&D(- z!np!I@=S%-wo-jL@l~C8g*zO1-H9x`SEZQcEaNh3bFOz$Xv4R_r_<2W0H5n36;$ut}6XCykXO`jQQ4xOM5aG#C7mXmKlA<{Fw-LeokaBhk?9<&V0I48l(p6Qzj)CNu7I74S5Eqo82V z;ej&!d|_oknBPI(anJh4rA5P=zN5a6CYq(J!N-#H@neS_%~CY+>q0c*dNMpOjVBX} zHdwT~k(jLB8$_uwEe_tv?v7~8$^z;#S;n9oH0J#szJ9II&6sGo41oqxAWgPT`EFe0 z^O);;%r2Kl>tXp)9x*H%4F5#KXXxkZxcYt9_#i15Eb5uELUHlUmKN-BHbvup3(kp# zhuVtN|4K#vE(~Z!5PgLbYy8w@Y^sqy7#SYYBBCkoETc5C@E4g(il$RN)`^l8rPOpj zWkpsYCP`MdJ$~)MbmWFA!fiR=CFf>|YPxBur)8BXE2#oCNH^3Fc)*t|!f2DJN`)H7 z&1};zU5n6_qUnLJ%52js4*T&lOCf5K?T^HY=GvZ;(d#C_gwamun_hSN)}%vC==IDxNL9u8bQ`s#_G4p4W4_&NLfytBfwRG4#;E%#!Z}M9V)5 zqU8b%q5ekm^?fI#j_yki>^o|tU7ak za~I0RG=SthP*IAi;Xss8?I!ug;a&wzhuI+_cMZ2|uj^Cfke4YD`GBY0;UOE%!2~p^ z%2i8Fo~YaZB0Ac^`#wcPPsckWr%Nm7Qi~DraQrtk#giiH{$>-=IbC|UgL?!#8_NA? zXvZ?Q9;2&W%c7cFkJS8M0Axdj6a9@N=AEh<8FX{>aWDJ`Ee({Ih`#3Oe+y`#XXM1w zGV%h81XRlCLX)vB0)v?QI)LKh+^ynM3zkNjV{!lt;&O-ilhqvj2av@kx$0xn9Q2nU zOaimK4bh8qBS5%($pN6{yQG7})afu`qF`_%%WL^}XkNzL-^=p;A!2uu<&NPfz&gMx zrZmr@lGB)B>kZPh+tXyRex<0>q>g{BfCT-N@HXbiC1lzh=?6=gscp%67e8U zcR467Z#<|_OcXMTvfQtT*b}(h5=7HrQfWL6E@ z5t4EI0Pqnvd6JV>LcV}`g~pk#jV>#yky1r+4c=)RDS(@0;FY#W-tNEAw({J0p=}Ko zq1zw?W*XklCU+L-U9w2N03-ej-N*2HSkIe)(b`s>4JgiPpqC`O>*j;^{9xW9_$8gzrY6n~H!{s~uV+M=H3uJQb^B;VA z79U12{zCcknRs{h%!AtLss)*O$b39*OHkOe;^}tP8gq(3pk`;BMauxP1;|DQ)Q?>p z$xUS+&$(TY`#nilEvF3G6E5U#R4x0C+vl$9B-6}4n90}#|0;tyue%7+rW*cYM_u11 zRYW1TY?naV2a-WSS{0vHhc`(_Mam0^<-1))kg84} zUsSE8Ps-F~OxHS@oAD$0{*BV@B;Yj2tYN%vZ*PzukX4LEhDWi4+#gSALs{v?cv{=M zSFSPqqQ&*ehZ22N%X3Xzj>2frCoXBbE(rr4`Q$*r|3|ZYj%${`OoV`ZZKY=U$4=%J z(_B|b5?G;G8NjH}%-W3%P)1RnkM>n9it>~=j8UFPq87?K(Lj01uB6f|C9{?vpsC8K z8V%me_2BB+lvg>z6$NEbUU6YRJ6hKSrPZ7vAdln23^6^E)d;6a2#SGz94Dp`n1*4r zDZwwu$N8yOJfGM8oYh8mm2Y_I5Y>JkKdcT6XFPiY#UoJu6$oy$s-=UlwJ=auM?`A5 zsi01@JWwuRt0rT#D+JV0PH78D^*})RSPNABc9N2;;^Z8*H7A^#I!4vbY_c9 zj(S>br=)Iga7m*V)b~UL$6Eae^?BTBmkmNYY!7;bl8~a)P0F_46yV4wk>&Ylw0fro z>fb(OkD*&8i-$g%r~ZVZD*?I}oy38}4{IVZMJr*I?sWXL`<7g}%ib*rmYZT%+-c^v zc9`YZebA0_yV2A<-dfF1;;!j7NV(w=bICmo-vf9}OTslrPD?%`fR98EgxfJs*$H+So zgIUD~}_(za9%tN;uP^ z)56P>cpyzuW^`|Rki&O#7^D9IDTiXurF75L7L9wA;;Or_(JFc`>}MO7hj97k&@PJ0-O)8NwSKyI z_9Q-R$9OPxZGW^kdvdpQ)v_V;D`d{!%W{wlEqj+U0OX-E%Q&kx1+opuR=y<&DePX=)O3as7%pI0LF5?ffr!AW@d6*Rl7MW!{Hs+I!AYTy~U1SFNI>;+N z$Y-$B(vE0U-E^xmEcQ{g9k?3?K)GCYI(LgbQzoK=>dh0%0giN6ZBQT%r&quK^@V|vfTzTYbK?%=@a~dG8TQM z+%~BECG^Gm)pd#fSh_kcw*E)D`d}Sa_D#BTanJtYdG;edFgHS3aazKX`Eza{JoK3TAX8rrK(mi^jGyce|fX=-gK7F8>P zw#5ZrVb=uhuKFCh@C8215!0JAIY-zaA*d^s?HwZ~?Y2wZ*;RDy2~NFg$EIn|rKVlf zHdV9}VyoLU&`!^KARo4D-{kJI5&=#0854c0T@t52`xLT2#C@wRdqN%dEoi@BrNyI9 zOjf#KJoFxM+$R+_=9217ppS?McD248^x8cAIGOs@B<`ASl@zNuw`=z4INUh)1!12t8i6rTF^VEfh?fJ> z_G+BoNG=D42~sZ(rg5-_O@+lkGq{l+A2H*II10w|brc!DMj5_^Ov+dXvyfuiYZzoe zjKeXNnCM&z`fv?|6ea~@iM6qc@+p+=;BCOT!WJ6Mf$Klm;XhkJ+j!4jBM^hk8H_r1dJe@sLtv^_I- he0%QtjJ9;)6RrJc>FAGp?!UY6@Z>uyE0MWw=Rchom*oHe diff --git a/pallets/unique/src/eth/stubs/CollectionHelpers.sol b/pallets/unique/src/eth/stubs/CollectionHelpers.sol index 1aad73de74..c2572292c3 100644 --- a/pallets/unique/src/eth/stubs/CollectionHelpers.sol +++ b/pallets/unique/src/eth/stubs/CollectionHelpers.sol @@ -25,12 +25,12 @@ contract CollectionHelpersEvents { } /// @title Contract, which allows users to operate with collections -/// @dev the ERC-165 identifier for this interface is 0x4135fff1 +/// @dev the ERC-165 identifier for this interface is 0x94e5af0d contract CollectionHelpers is Dummy, ERC165, CollectionHelpersEvents { /// Create a collection /// @return address Address of the newly created collection - /// @dev EVM selector for this function is: 0xa765ee5b, - /// or in textual repr: createCollection(((address,uint256),string,string,string,uint8,uint8,(string,bytes)[],(string,(uint8,bool)[])[],(address,uint256)[],(bool,bool,address[]),(uint8,uint256)[],uint8)) + /// @dev EVM selector for this function is: 0x72b5bea7, + /// or in textual repr: createCollection((string,string,string,uint8,uint8,(string,bytes)[],(string,(uint8,bool)[])[],(address,uint256)[],(bool,bool,address[]),(uint8,uint256)[],(address,uint256),uint8)) function createCollection(CreateCollectionData memory data) public payable returns (address) { require(false, stub_error); data; @@ -170,8 +170,6 @@ contract CollectionHelpers is Dummy, ERC165, CollectionHelpersEvents { /// Collection properties struct CreateCollectionData { - /// Collection sponsor - CrossAddress pending_sponsor; /// Collection name string name; /// Collection description @@ -192,11 +190,12 @@ struct CreateCollectionData { CollectionNestingAndPermission nesting_settings; /// Collection limits CollectionLimitValue[] limits; + /// Collection sponsor + CrossAddress pending_sponsor; /// Extra collection flags CollectionFlags flags; } -/// Cross account struct type CollectionFlags is uint8; library CollectionFlagsLib { @@ -207,13 +206,19 @@ library CollectionFlagsLib { /// External collections can't be managed using `unique` api CollectionFlags constant externalField = CollectionFlags.wrap(1); - /// Reserved bits + /// Reserved flags function reservedField(uint8 value) public pure returns (CollectionFlags) { require(value < 1 << 5, "out of bound value"); return CollectionFlags.wrap(value << 1); } } +/// Cross account struct +struct CrossAddress { + address eth; + uint256 sub; +} + /// [`CollectionLimits`](up_data_structs::CollectionLimits) field representation for EVM. struct CollectionLimitValue { CollectionLimitField field; @@ -252,12 +257,6 @@ struct CollectionNestingAndPermission { address[] restricted; } -/// Cross account struct -struct CrossAddress { - address eth; - uint256 sub; -} - /// Ethereum representation of Token Property Permissions. struct TokenPropertyPermission { /// Token property key. @@ -292,10 +291,10 @@ struct Property { /// Type of tokens in collection enum CollectionMode { - /// Fungible - Fungible, /// Nonfungible Nonfungible, + /// Fungible + Fungible, /// Refungible Refungible } diff --git a/runtime/common/ethereum/sponsoring/refungible.rs b/runtime/common/ethereum/sponsoring/refungible.rs index fd93fbcae3..1597f5c9d1 100644 --- a/runtime/common/ethereum/sponsoring/refungible.rs +++ b/runtime/common/ethereum/sponsoring/refungible.rs @@ -242,6 +242,7 @@ mod erc721 { BurnFrom { .. } | BurnFromCross { .. } | MintBulk { .. } + | MintBulkCross { .. } | MintBulkWithTokenUri { .. } => None, MintCross { .. } => withdraw_create_item::( diff --git a/tests/src/eth/abi/nonFungible.json b/tests/src/eth/abi/nonFungible.json index 3a520c114e..191e15c19d 100644 --- a/tests/src/eth/abi/nonFungible.json +++ b/tests/src/eth/abi/nonFungible.json @@ -465,6 +465,39 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "components": [ + { + "components": [ + { "internalType": "address", "name": "eth", "type": "address" }, + { "internalType": "uint256", "name": "sub", "type": "uint256" } + ], + "internalType": "struct CrossAddress", + "name": "owner", + "type": "tuple" + }, + { + "components": [ + { "internalType": "string", "name": "key", "type": "string" }, + { "internalType": "bytes", "name": "value", "type": "bytes" } + ], + "internalType": "struct Property[]", + "name": "properties", + "type": "tuple[]" + } + ], + "internalType": "struct MintTokenData[]", + "name": "data", + "type": "tuple[]" + } + ], + "name": "mintBulkCross", + "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { diff --git a/tests/src/eth/abi/reFungible.json b/tests/src/eth/abi/reFungible.json index b3e0b84d34..30a32d0266 100644 --- a/tests/src/eth/abi/reFungible.json +++ b/tests/src/eth/abi/reFungible.json @@ -447,6 +447,39 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "components": [ + { + "components": [ + { "internalType": "address", "name": "eth", "type": "address" }, + { "internalType": "uint256", "name": "sub", "type": "uint256" } + ], + "internalType": "struct CrossAddress", + "name": "owner", + "type": "tuple" + }, + { + "components": [ + { "internalType": "string", "name": "key", "type": "string" }, + { "internalType": "bytes", "name": "value", "type": "bytes" } + ], + "internalType": "struct Property[]", + "name": "properties", + "type": "tuple[]" + } + ], + "internalType": "struct MintTokenData[]", + "name": "tokenProperties", + "type": "tuple[]" + } + ], + "name": "mintBulkCross", + "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { diff --git a/tests/src/eth/api/CollectionHelpers.sol b/tests/src/eth/api/CollectionHelpers.sol index 8525bca45a..7af99d9e38 100644 --- a/tests/src/eth/api/CollectionHelpers.sol +++ b/tests/src/eth/api/CollectionHelpers.sol @@ -20,12 +20,12 @@ interface CollectionHelpersEvents { } /// @title Contract, which allows users to operate with collections -/// @dev the ERC-165 identifier for this interface is 0x4135fff1 +/// @dev the ERC-165 identifier for this interface is 0x94e5af0d interface CollectionHelpers is Dummy, ERC165, CollectionHelpersEvents { /// Create a collection /// @return address Address of the newly created collection - /// @dev EVM selector for this function is: 0xa765ee5b, - /// or in textual repr: createCollection(((address,uint256),string,string,string,uint8,uint8,(string,bytes)[],(string,(uint8,bool)[])[],(address,uint256)[],(bool,bool,address[]),(uint8,uint256)[],uint8)) + /// @dev EVM selector for this function is: 0x72b5bea7, + /// or in textual repr: createCollection((string,string,string,uint8,uint8,(string,bytes)[],(string,(uint8,bool)[])[],(address,uint256)[],(bool,bool,address[]),(uint8,uint256)[],(address,uint256),uint8)) function createCollection(CreateCollectionData memory data) external payable returns (address); /// Create an NFT collection @@ -103,8 +103,6 @@ interface CollectionHelpers is Dummy, ERC165, CollectionHelpersEvents { /// Collection properties struct CreateCollectionData { - /// Collection sponsor - CrossAddress pending_sponsor; /// Collection name string name; /// Collection description @@ -125,11 +123,12 @@ struct CreateCollectionData { CollectionNestingAndPermission nesting_settings; /// Collection limits CollectionLimitValue[] limits; + /// Collection sponsor + CrossAddress pending_sponsor; /// Extra collection flags CollectionFlags flags; } -/// Cross account struct type CollectionFlags is uint8; library CollectionFlagsLib { @@ -140,13 +139,19 @@ library CollectionFlagsLib { /// External collections can't be managed using `unique` api CollectionFlags constant externalField = CollectionFlags.wrap(1); - /// Reserved bits + /// Reserved flags function reservedField(uint8 value) public pure returns (CollectionFlags) { require(value < 1 << 5, "out of bound value"); return CollectionFlags.wrap(value << 1); } } +/// Cross account struct +struct CrossAddress { + address eth; + uint256 sub; +} + /// [`CollectionLimits`](up_data_structs::CollectionLimits) field representation for EVM. struct CollectionLimitValue { CollectionLimitField field; @@ -185,12 +190,6 @@ struct CollectionNestingAndPermission { address[] restricted; } -/// Cross account struct -struct CrossAddress { - address eth; - uint256 sub; -} - /// Ethereum representation of Token Property Permissions. struct TokenPropertyPermission { /// Token property key. @@ -225,10 +224,10 @@ struct Property { /// Type of tokens in collection enum CollectionMode { - /// Fungible - Fungible, /// Nonfungible Nonfungible, + /// Fungible + Fungible, /// Refungible Refungible } diff --git a/tests/src/eth/api/UniqueNFT.sol b/tests/src/eth/api/UniqueNFT.sol index b03e39948a..a14de3709c 100644 --- a/tests/src/eth/api/UniqueNFT.sol +++ b/tests/src/eth/api/UniqueNFT.sol @@ -551,7 +551,7 @@ interface ERC721UniqueMintable is Dummy, ERC165 { } /// @title Unique extensions for ERC721. -/// @dev the ERC-165 identifier for this interface is 0x307b061a +/// @dev the ERC-165 identifier for this interface is 0x9b397d16 interface ERC721UniqueExtensions is Dummy, ERC165 { /// @notice A descriptive name for a collection of NFTs in this contract /// @dev EVM selector for this function is: 0x06fdde03, @@ -674,6 +674,12 @@ interface ERC721UniqueExtensions is Dummy, ERC165 { // /// or in textual repr: mintBulk(address,uint256[]) // function mintBulk(address to, uint256[] memory tokenIds) external returns (bool); + /// @notice Function to mint a token. + /// @param data Array of pairs of token owner and token's properties for minted token + /// @dev EVM selector for this function is: 0xab427b0c, + /// or in textual repr: mintBulkCross(((address,uint256),(string,bytes)[])[]) + function mintBulkCross(MintTokenData[] memory data) external returns (bool); + // /// @notice Function to mint multiple tokens with the given tokenUris. // /// @dev `tokenIds` is array of pairs of token ID and token URI. Token IDs should be consecutive // /// numbers and first number should be obtained with `nextTokenId` method @@ -705,6 +711,11 @@ struct TokenUri { string uri; } +struct MintTokenData { + CrossAddress owner; + Property[] properties; +} + /// @title ERC-721 Non-Fungible Token Standard, optional enumeration extension /// @dev See https://eips.ethereum.org/EIPS/eip-721 /// @dev the ERC-165 identifier for this interface is 0x780e9d63 diff --git a/tests/src/eth/api/UniqueRefungible.sol b/tests/src/eth/api/UniqueRefungible.sol index 61c0138998..11d6806df0 100644 --- a/tests/src/eth/api/UniqueRefungible.sol +++ b/tests/src/eth/api/UniqueRefungible.sol @@ -551,7 +551,7 @@ interface ERC721UniqueMintable is Dummy, ERC165 { } /// @title Unique extensions for ERC721. -/// @dev the ERC-165 identifier for this interface is 0x95c0f66c +/// @dev the ERC-165 identifier for this interface is 0x3e828d60 interface ERC721UniqueExtensions is Dummy, ERC165 { /// @notice A descriptive name for a collection of NFTs in this contract /// @dev EVM selector for this function is: 0x06fdde03, @@ -668,6 +668,12 @@ interface ERC721UniqueExtensions is Dummy, ERC165 { // /// or in textual repr: mintBulk(address,uint256[]) // function mintBulk(address to, uint256[] memory tokenIds) external returns (bool); + /// @notice Function to mint a token. + /// @param tokenProperties Properties of minted token + /// @dev EVM selector for this function is: 0xab427b0c, + /// or in textual repr: mintBulkCross(((address,uint256),(string,bytes)[])[]) + function mintBulkCross(MintTokenData[] memory tokenProperties) external returns (bool); + // /// @notice Function to mint multiple tokens with the given tokenUris. // /// @dev `tokenIds` is array of pairs of token ID and token URI. Token IDs should be consecutive // /// numbers and first number should be obtained with `nextTokenId` method @@ -706,6 +712,11 @@ struct TokenUri { string uri; } +struct MintTokenData { + CrossAddress owner; + Property[] properties; +} + /// @title ERC-721 Non-Fungible Token Standard, optional enumeration extension /// @dev See https://eips.ethereum.org/EIPS/eip-721 /// @dev the ERC-165 identifier for this interface is 0x780e9d63 From a3eb5c564fa0b1670e8bba334f14646ae31044c7 Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Fri, 22 Sep 2023 08:13:51 +0000 Subject: [PATCH 028/143] test: add tests --- tests/src/eth/nonFungible.test.ts | 91 +++++++++++++++++++++++++++++++ tests/src/eth/reFungible.test.ts | 91 +++++++++++++++++++++++++++++++ 2 files changed, 182 insertions(+) diff --git a/tests/src/eth/nonFungible.test.ts b/tests/src/eth/nonFungible.test.ts index 995213b0b3..8593ee4461 100644 --- a/tests/src/eth/nonFungible.test.ts +++ b/tests/src/eth/nonFungible.test.ts @@ -18,6 +18,7 @@ import {itEth, usingEthPlaygrounds, expect, EthUniqueHelper} from './util'; import {IKeyringPair} from '@polkadot/types/types'; import {Contract} from 'web3-eth-contract'; import {ITokenPropertyPermission} from '../util/playgrounds/types'; +import { CREATE_COLLECTION_DATA_DEFAULTS, CollectionMode, CreateCollectionData, TokenPermissionField } from './util/playgrounds/types'; describe('Check ERC721 token URI for NFT', () => { let donor: IKeyringPair; @@ -197,6 +198,96 @@ describe('NFT: Plain calls', () => { } }); + itEth('Can perform mintBulkCross()', async ({helper}) => { + const caller = await helper.eth.createAccountWithBalance(donor); + const callerCross = helper.ethCrossAccount.fromAddress(caller); + const receiver = helper.eth.createAccount(); + const receiverCross = helper.ethCrossAccount.fromAddress(receiver); + + const permissions = [ + {code: TokenPermissionField.Mutable, value: true}, + {code: TokenPermissionField.TokenOwner, value: true}, + {code: TokenPermissionField.CollectionAdmin, value: true}, + ]; + const {collectionAddress} = await helper.eth.createCollection( + caller, + { + ...CREATE_COLLECTION_DATA_DEFAULTS, + name: 'A', + description: 'B', + tokenPrefix: 'C', + collectionMode: 'rft', + adminList: [callerCross], + tokenPropertyPermissions: [ + {key: 'key_0_0', permissions}, + {key: 'key_1_0', permissions}, + {key: 'key_1_1', permissions}, + {key: 'key_2_0', permissions}, + {key: 'key_2_1', permissions}, + {key: 'key_2_2', permissions}, + ], + }, + ).send(); + + const contract = await helper.ethNativeContract.collection(collectionAddress, 'nft', caller); + { + const nextTokenId = await contract.methods.nextTokenId().call(); + expect(nextTokenId).to.be.equal('1'); + const result = await contract.methods.mintBulkCross([ + { + owner: receiverCross, + properties: [ + {key: 'key_0_0', value: Buffer.from('value_0_0')}, + ], + }, + { + owner: receiverCross, + properties: [ + {key: 'key_1_0', value: Buffer.from('value_1_0')}, + {key: 'key_1_1', value: Buffer.from('value_1_1')}, + ], + }, + { + owner: receiverCross, + properties: [ + {key: 'key_2_0', value: Buffer.from('value_2_0')}, + {key: 'key_2_1', value: Buffer.from('value_2_1')}, + {key: 'key_2_2', value: Buffer.from('value_2_2')}, + ], + }, + ]).send({from: caller}); + const events = result.events.Transfer.sort((a: any, b: any) => +a.returnValues.tokenId - b.returnValues.tokenId); + const bulkSize = 3; + for(let i = 0; i < bulkSize; i++) { + const event = events[i]; + expect(event.address).to.equal(collectionAddress); + expect(event.returnValues.from).to.equal('0x0000000000000000000000000000000000000000'); + expect(event.returnValues.to).to.equal(receiver); + expect(event.returnValues.tokenId).to.equal(`${+nextTokenId + i}`); + } + + const properties = [ + await contract.methods.properties(+nextTokenId, []).call(), + await contract.methods.properties(+nextTokenId + 1, []).call(), + await contract.methods.properties(+nextTokenId + 2, []).call(), + ]; + expect(properties).to.be.deep.equal([ + [ + ['key_0_0', helper.getWeb3().utils.toHex('value_0_0')], + ], + [ + ['key_1_0', helper.getWeb3().utils.toHex('value_1_0')], + ['key_1_1', helper.getWeb3().utils.toHex('value_1_1')], + ], + [ + ['key_2_0', helper.getWeb3().utils.toHex('value_2_0')], + ['key_2_1', helper.getWeb3().utils.toHex('value_2_1')], + ['key_2_2', helper.getWeb3().utils.toHex('value_2_2')], + ], + ]); + } + }); + itEth('Can perform burn()', async ({helper}) => { const caller = await helper.eth.createAccountWithBalance(donor); diff --git a/tests/src/eth/reFungible.test.ts b/tests/src/eth/reFungible.test.ts index ecf0f07566..c9d138eab2 100644 --- a/tests/src/eth/reFungible.test.ts +++ b/tests/src/eth/reFungible.test.ts @@ -18,6 +18,7 @@ import {Pallets, requirePalletsOrSkip} from '../util'; import {expect, itEth, usingEthPlaygrounds} from './util'; import {IKeyringPair} from '@polkadot/types/types'; import {ITokenPropertyPermission} from '../util/playgrounds/types'; +import { CREATE_COLLECTION_DATA_DEFAULTS, TokenPermissionField } from './util/playgrounds/types'; describe('Refungible: Plain calls', () => { let donor: IKeyringPair; @@ -125,6 +126,96 @@ describe('Refungible: Plain calls', () => { } }); + itEth('Can perform mintBulkCross()', async ({helper}) => { + const caller = await helper.eth.createAccountWithBalance(donor); + const callerCross = helper.ethCrossAccount.fromAddress(caller); + const receiver = helper.eth.createAccount(); + const receiverCross = helper.ethCrossAccount.fromAddress(receiver); + + const permissions = [ + {code: TokenPermissionField.Mutable, value: true}, + {code: TokenPermissionField.TokenOwner, value: true}, + {code: TokenPermissionField.CollectionAdmin, value: true}, + ]; + const {collectionAddress} = await helper.eth.createCollection( + caller, + { + ...CREATE_COLLECTION_DATA_DEFAULTS, + name: 'A', + description: 'B', + tokenPrefix: 'C', + collectionMode: 'rft', + adminList: [callerCross], + tokenPropertyPermissions: [ + {key: 'key_0_0', permissions}, + {key: 'key_1_0', permissions}, + {key: 'key_1_1', permissions}, + {key: 'key_2_0', permissions}, + {key: 'key_2_1', permissions}, + {key: 'key_2_2', permissions}, + ], + }, + ).send(); + + const contract = await helper.ethNativeContract.collection(collectionAddress, 'nft', caller); + { + const nextTokenId = await contract.methods.nextTokenId().call(); + expect(nextTokenId).to.be.equal('1'); + const result = await contract.methods.mintBulkCross([ + { + owner: receiverCross, + properties: [ + {key: 'key_0_0', value: Buffer.from('value_0_0')}, + ], + }, + { + owner: receiverCross, + properties: [ + {key: 'key_1_0', value: Buffer.from('value_1_0')}, + {key: 'key_1_1', value: Buffer.from('value_1_1')}, + ], + }, + { + owner: receiverCross, + properties: [ + {key: 'key_2_0', value: Buffer.from('value_2_0')}, + {key: 'key_2_1', value: Buffer.from('value_2_1')}, + {key: 'key_2_2', value: Buffer.from('value_2_2')}, + ], + }, + ]).send({from: caller}); + const events = result.events.Transfer.sort((a: any, b: any) => +a.returnValues.tokenId - b.returnValues.tokenId); + const bulkSize = 3; + for(let i = 0; i < bulkSize; i++) { + const event = events[i]; + expect(event.address).to.equal(collectionAddress); + expect(event.returnValues.from).to.equal('0x0000000000000000000000000000000000000000'); + expect(event.returnValues.to).to.equal(receiver); + expect(event.returnValues.tokenId).to.equal(`${+nextTokenId + i}`); + } + + const properties = [ + await contract.methods.properties(+nextTokenId, []).call(), + await contract.methods.properties(+nextTokenId + 1, []).call(), + await contract.methods.properties(+nextTokenId + 2, []).call(), + ]; + expect(properties).to.be.deep.equal([ + [ + ['key_0_0', helper.getWeb3().utils.toHex('value_0_0')], + ], + [ + ['key_1_0', helper.getWeb3().utils.toHex('value_1_0')], + ['key_1_1', helper.getWeb3().utils.toHex('value_1_1')], + ], + [ + ['key_2_0', helper.getWeb3().utils.toHex('value_2_0')], + ['key_2_1', helper.getWeb3().utils.toHex('value_2_1')], + ['key_2_2', helper.getWeb3().utils.toHex('value_2_2')], + ], + ]); + } + }); + itEth('Can perform setApprovalForAll()', async ({helper}) => { const owner = await helper.eth.createAccountWithBalance(donor); const operator = helper.eth.createAccount(); From 9425c0a8f17b13beb12568d38fcdb57f11f6cb05 Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Fri, 22 Sep 2023 08:13:51 +0000 Subject: [PATCH 029/143] fix: warnings --- node/cli/src/command.rs | 2 +- pallets/common/src/eth.rs | 2 +- pallets/scheduler-v2/src/benchmarking.rs | 6 ++++-- runtime/common/tests/mod.rs | 2 +- tests/src/eth/nonFungible.test.ts | 2 +- tests/src/eth/reFungible.test.ts | 2 +- 6 files changed, 9 insertions(+), 7 deletions(-) diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index c4be6cbc81..da2cddf72b 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -425,7 +425,7 @@ pub fn run() -> Result<()> { .map(|cfg| &cfg.registry); let task_manager = sc_service::TaskManager::new(runner.config().tokio_handle.clone(), *registry) - .map_err(|e| format!("Error: {:?}", e))?; + .map_err(|e| format!("Error: {e:?}"))?; let info_provider = Some(timestamp_with_aura_info(12000)); runner.async_run(|config| -> Result<(Pin>>, _)> { diff --git a/pallets/common/src/eth.rs b/pallets/common/src/eth.rs index 9d9afd5e00..ba5662341a 100644 --- a/pallets/common/src/eth.rs +++ b/pallets/common/src/eth.rs @@ -119,7 +119,7 @@ impl CrossAddress { } else if self.sub == Default::default() { Ok(Some(T::CrossAccountId::from_eth(self.eth))) } else { - Err(format!("All fields of cross account is non zeroed {:?}", self).into()) + Err(format!("All fields of cross account is non zeroed {self:?}").into()) } } diff --git a/pallets/scheduler-v2/src/benchmarking.rs b/pallets/scheduler-v2/src/benchmarking.rs index b70c086c65..6ef47aaf7a 100644 --- a/pallets/scheduler-v2/src/benchmarking.rs +++ b/pallets/scheduler-v2/src/benchmarking.rs @@ -136,8 +136,10 @@ fn make_call(maybe_lookup_len: Option) -> ScheduledCall { let bound = EncodedCall::bound() as u32; let mut len = match maybe_lookup_len { Some(len) => { - len.min(>::MaxSize::get() - 2) - .max(bound) - 3 + len.clamp( + bound, + >::MaxSize::get() - 2, + ) - 3 } None => bound.saturating_sub(4), }; diff --git a/runtime/common/tests/mod.rs b/runtime/common/tests/mod.rs index 44eebea8b4..da9cb73104 100644 --- a/runtime/common/tests/mod.rs +++ b/runtime/common/tests/mod.rs @@ -33,7 +33,7 @@ const PARA_ID: u32 = 2095; const PARA_ID: u32 = 2037; fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) + TPublic::Pair::from_string(&format!("//{seed}"), None) .expect("static values are valid; qed") .public() } diff --git a/tests/src/eth/nonFungible.test.ts b/tests/src/eth/nonFungible.test.ts index 8593ee4461..9d6480014b 100644 --- a/tests/src/eth/nonFungible.test.ts +++ b/tests/src/eth/nonFungible.test.ts @@ -18,7 +18,7 @@ import {itEth, usingEthPlaygrounds, expect, EthUniqueHelper} from './util'; import {IKeyringPair} from '@polkadot/types/types'; import {Contract} from 'web3-eth-contract'; import {ITokenPropertyPermission} from '../util/playgrounds/types'; -import { CREATE_COLLECTION_DATA_DEFAULTS, CollectionMode, CreateCollectionData, TokenPermissionField } from './util/playgrounds/types'; +import {CREATE_COLLECTION_DATA_DEFAULTS, CollectionMode, CreateCollectionData, TokenPermissionField} from './util/playgrounds/types'; describe('Check ERC721 token URI for NFT', () => { let donor: IKeyringPair; diff --git a/tests/src/eth/reFungible.test.ts b/tests/src/eth/reFungible.test.ts index c9d138eab2..abee1fe049 100644 --- a/tests/src/eth/reFungible.test.ts +++ b/tests/src/eth/reFungible.test.ts @@ -18,7 +18,7 @@ import {Pallets, requirePalletsOrSkip} from '../util'; import {expect, itEth, usingEthPlaygrounds} from './util'; import {IKeyringPair} from '@polkadot/types/types'; import {ITokenPropertyPermission} from '../util/playgrounds/types'; -import { CREATE_COLLECTION_DATA_DEFAULTS, TokenPermissionField } from './util/playgrounds/types'; +import {CREATE_COLLECTION_DATA_DEFAULTS, TokenPermissionField} from './util/playgrounds/types'; describe('Refungible: Plain calls', () => { let donor: IKeyringPair; From 0c121c74a84853675fd3caa6ec06a584c64281bf Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Fri, 22 Sep 2023 08:13:51 +0000 Subject: [PATCH 030/143] feat: add a way to split RFT between multiple owners on mint --- pallets/common/src/eth.rs | 9 ----- pallets/nonfungible/src/erc.rs | 15 +++++++-- pallets/nonfungible/src/stubs/UniqueNFT.raw | Bin 6443 -> 6443 bytes pallets/nonfungible/src/stubs/UniqueNFT.sol | 3 ++ pallets/refungible/src/erc.rs | 30 +++++++++++++---- .../refungible/src/stubs/UniqueRefungible.raw | Bin 6443 -> 6641 bytes .../refungible/src/stubs/UniqueRefungible.sol | 19 ++++++++--- tests/src/eth/abi/reFungible.json | 26 ++++++++++++--- tests/src/eth/api/UniqueNFT.sol | 3 ++ tests/src/eth/api/UniqueRefungible.sol | 19 ++++++++--- tests/src/eth/nonFungible.test.ts | 2 +- tests/src/eth/reFungible.test.ts | 31 +++++++++++++++--- 12 files changed, 120 insertions(+), 37 deletions(-) diff --git a/pallets/common/src/eth.rs b/pallets/common/src/eth.rs index ba5662341a..ba56ac4314 100644 --- a/pallets/common/src/eth.rs +++ b/pallets/common/src/eth.rs @@ -631,12 +631,3 @@ impl From for up_data_structs::AccessMode { } } } - -/// Token minting parameters -#[derive(AbiCoder, Default, Debug)] -pub struct MintTokenData { - /// Minted token owner - pub owner: CrossAddress, - /// Minted token properties - pub properties: Vec, -} diff --git a/pallets/nonfungible/src/erc.rs b/pallets/nonfungible/src/erc.rs index d6a1c494db..8d37ec100c 100644 --- a/pallets/nonfungible/src/erc.rs +++ b/pallets/nonfungible/src/erc.rs @@ -26,7 +26,7 @@ use core::{ char::{REPLACEMENT_CHARACTER, decode_utf16}, convert::TryInto, }; -use evm_coder::{abi::AbiType, ToLog, generate_stubgen, solidity_interface, types::*}; +use evm_coder::{abi::AbiType, AbiCoder, ToLog, generate_stubgen, solidity_interface, types::*}; use frame_support::BoundedVec; use up_data_structs::{ TokenId, PropertyPermission, PropertyKeyPermission, Property, CollectionId, PropertyKey, @@ -64,6 +64,15 @@ pub enum ERC721TokenEvent { }, } +/// Token minting parameters +#[derive(AbiCoder, Default, Debug)] +pub struct MintTokenData { + /// Minted token owner + pub owner: eth::CrossAddress, + /// Minted token properties + pub properties: Vec, +} + frontier_contract! { macro_rules! NonfungibleHandle_result {...} impl Contract for NonfungibleHandle {...} @@ -984,14 +993,14 @@ where /// @notice Function to mint a token. /// @param data Array of pairs of token owner and token's properties for minted token #[weight(>::create_multiple_items(data.len() as u32) + >::set_token_properties(data.len() as u32))] - fn mint_bulk_cross(&mut self, caller: Caller, data: Vec) -> Result { + fn mint_bulk_cross(&mut self, caller: Caller, data: Vec) -> Result { let caller = T::CrossAccountId::from_eth(caller); let budget = self .recorder .weight_calls_budget(>::find_parent()); let mut create_nft_data = Vec::with_capacity(data.len()); - for eth::MintTokenData { owner, properties } in data { + for MintTokenData { owner, properties } in data { let owner = owner.into_sub_cross_account::()?; create_nft_data.push(CreateItemData:: { properties: properties diff --git a/pallets/nonfungible/src/stubs/UniqueNFT.raw b/pallets/nonfungible/src/stubs/UniqueNFT.raw index 969df8bd6cf7779bb9c495b0db6ee37b9501b9f2..faa4896938d0528da4b1d4b559475a9a1c3f87c1 100644 GIT binary patch delta 44 zcmV+{0Mq}gGOIGM+87|VSl, + /// Minted token properties + pub properties: Vec, +} + /// @title A contract that allows to set and delete token properties and change token property permissions. #[solidity_interface(name = TokenProperties, events(ERC721TokenEvent), enum(derive(PreDispatch)), enum_attr(weight))] impl RefungibleHandle { @@ -1027,7 +1045,7 @@ where fn mint_bulk_cross( &mut self, caller: Caller, - token_properties: Vec, + token_properties: Vec, ) -> Result { let caller = T::CrossAccountId::from_eth(caller); let budget = self @@ -1035,11 +1053,11 @@ where .weight_calls_budget(>::find_parent()); let mut create_rft_data = Vec::with_capacity(token_properties.len()); - for eth::MintTokenData { owner, properties } in token_properties { - let owner = owner.into_sub_cross_account::()?; - let users: BoundedBTreeMap<_, _, _> = [(owner, 1)] + for MintTokenData { owners, properties } in token_properties { + let users: BoundedBTreeMap<_, _, _> = owners .into_iter() - .collect::>() + .map(|data| Ok((data.owner.into_sub_cross_account::()?, data.pieces))) + .collect::>>()? .try_into() .map_err(|_| "too many users")?; create_rft_data.push(CreateItemData:: { diff --git a/pallets/refungible/src/stubs/UniqueRefungible.raw b/pallets/refungible/src/stubs/UniqueRefungible.raw index 1d811799708702bf4cb012465768ec4d0d315a11..c1152e827ef61877d710c201b008cc77938b0312 100644 GIT binary patch delta 1690 zcmaJ>U2GIp6yCeLEnT;PotZl`{UhD}Xe6;A6$iD|bQRPfiA>kB(=d?S3o#KBrO?O& z4RmI9W=m^oclQECNO(b?@UK=8P@gmvsiDTEDF*q`2b4skO$ZOfg6Gce5b(j-?Ckmd z&bjA1cTYUJuu33TNh(bqB9?8Z^IJo;(-XowC|o14_tBHfzA7H>5GblMtZi5JZ26qs zKx>!4FQ%34tSh`?aHe(fb%BPigP)6FmqDCezws2N(WM~H&8&%owS@!h+S`d~#cqRy ztVPq>#<4-a0IqL#&F%#88i;2HGM7NS0phvi@BRR};UMAQg`>|vGevNn*nMjX#5P)% zp&Bo7YIMV8Nd6MU)6t6&5YK`5#mY^+5O8r4N47;@{Fz4PSCc3el#EHE!!+`Tuk8gb za@Y6Sy_&R@M=CV>9gmD*&?@%P$^@HPqIO8f?hE~qYYSyioZyM)%ZWbBU7!3t6RTF% zD%B8Dd4N{_nlVYLn?@E3d8z;kv#Qjv+Rpl`S}WCdaFW16V=e5bsvI~cX{;~SY{5K< z!C(W$V8U6)$o!YhGmCsT1!5%Ph4#C2)xcE&8|B*ords#cQ_ zNjk2N_buzgc(ZV+6zgbNqc-E=%&d9^`O-WWfecr(Dh?H47HoR*+c zb;;^!cA82JBsqj=)g~Yy>7cZgP3v6XqvDuUyqqyDlJrno*L5ogZ+=JyZyPn`!QYfI z_5AoD@bU*>fUH#33NouW>0 z$Rnv73dpitq~-M@|8U%ZGu+HM8^lkP5bog&uP=k*-v8wc#)Qk)l=A>bcQv5XLx^)% zJz=;$clbyN&eB{&QuauMG}aoY5_G6wyB87g=B9Z>nX zv7A3QJI$HXcHH^TV=I`-+XGaH;60`-m-7wltOGPuqH;a(2}=O{!x4sf*< z((lyf6esPPPH!O;hsuH+s$- m{c~++$7KDwseRMY4^F;tZBBS?%lyFd{->JSwzFF`?f(KENk>2c delta 1457 zcmY*YZ)h837|)wa(`2bz?(V%y(@vYVQz^FAsS&0h8fPnvZFoJqT=78PcNP6ml)1q~ zhvxpKf70S5Z)YdPPl|}Ghzw!FRdk51L$^T{C$m%hFgK`RSrHl3`CM`tF1e83`~05Y z^FHtId2j8Z`3HIO8_B8U9%7iL>g^Au8$RXVLE+S1wt#kQ`Mmd;VV-UfIT$8 zpg85j!#BQxlK#dgrk}bD(X#+gMlNvxFCNF-NMv}8a;uM!2<7FxPPtzw_mjU8nqU=~ z2sq);ZQT6-S9sXZlxbwi*Tpfc8s4PQyVzHasct-TTk07<^<0oz6>|0Mr6?0(y=m!5 zNQutS=@UF`)ML!(M^MS9c}<=D0}5r8#%~x zOyrH*egU(V?7B#|%_IbFYDU&Ied=AU0OK?_^dcg}oZ?7Z8kr&xpZOYyQ+|=g9XKSv zG+lBLjsFY#sGfzg-dMG}%eN9&r%O%|A(~i*loC>1RZ+@C1P|riysjLZRe}%L5iVfkAhs7oArNL91lEBsc(>-7zFO?5QQ#8H!7Np6fi}f~poF-$ zYRVYN*r+OawoV)Wf_3zF<$!yLG0u|0TGQk)Ay83<8vfxUn``o-_- zc`lk!bj$Ty>()rlearVsT&?f5UTuPK#cYz|;5Bozrb@BEzQb){@dx%NzB4SQSd-Ke z#-ofES~GY&kT?C~S}>YBKK7?UGM3{P#~-k@EjWx%vDc)w6kZ8(@gFYC1x8;{DE^y$ zB5lP69IJY5rF4)ab+EaVCQBRIgu{`(G%BjrpfNaJRH;;9vF5H_(pAld2;s~mm6kO4 zQR#uaZmUyhP?Dx>q*h9(LQuu4pt3iKJSstV%IqYXJqlN+Jch~ye3f2iuQ&I_wHlh~ad{F8N!HAQMx0 zMWd-0$Zw{LvZXqbL>gKx^tMuqhDz{SWxNat=|%sRq{5mx { name: 'A', description: 'B', tokenPrefix: 'C', - collectionMode: 'rft', + collectionMode: 'nft', adminList: [callerCross], tokenPropertyPermissions: [ {key: 'key_0_0', permissions}, diff --git a/tests/src/eth/reFungible.test.ts b/tests/src/eth/reFungible.test.ts index abee1fe049..66620725a0 100644 --- a/tests/src/eth/reFungible.test.ts +++ b/tests/src/eth/reFungible.test.ts @@ -131,6 +131,8 @@ describe('Refungible: Plain calls', () => { const callerCross = helper.ethCrossAccount.fromAddress(caller); const receiver = helper.eth.createAccount(); const receiverCross = helper.ethCrossAccount.fromAddress(receiver); + const receiver2 = helper.eth.createAccount(); + const receiver2Cross = helper.ethCrossAccount.fromAddress(receiver2); const permissions = [ {code: TokenPermissionField.Mutable, value: true}, @@ -157,26 +159,41 @@ describe('Refungible: Plain calls', () => { }, ).send(); - const contract = await helper.ethNativeContract.collection(collectionAddress, 'nft', caller); + const contract = await helper.ethNativeContract.collection(collectionAddress, 'rft', caller); { const nextTokenId = await contract.methods.nextTokenId().call(); expect(nextTokenId).to.be.equal('1'); const result = await contract.methods.mintBulkCross([ { - owner: receiverCross, + owners: [{ + owner: receiverCross, + pieces: 1, + }], properties: [ {key: 'key_0_0', value: Buffer.from('value_0_0')}, ], }, { - owner: receiverCross, + owners: [{ + owner: receiverCross, + pieces: 2, + }], properties: [ {key: 'key_1_0', value: Buffer.from('value_1_0')}, {key: 'key_1_1', value: Buffer.from('value_1_1')}, ], }, { - owner: receiverCross, + owners: [ + { + owner: receiverCross, + pieces: 1, + }, + { + owner: receiver2Cross, + pieces: 2, + }, + ], properties: [ {key: 'key_2_0', value: Buffer.from('value_2_0')}, {key: 'key_2_1', value: Buffer.from('value_2_1')}, @@ -190,7 +207,11 @@ describe('Refungible: Plain calls', () => { const event = events[i]; expect(event.address).to.equal(collectionAddress); expect(event.returnValues.from).to.equal('0x0000000000000000000000000000000000000000'); - expect(event.returnValues.to).to.equal(receiver); + if(i == 0 || i == 1) + expect(event.returnValues.to).to.equal(receiver); + else + expect(event.returnValues.to).to.equal('0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF'); + expect(event.returnValues.tokenId).to.equal(`${+nextTokenId + i}`); } From 752e2b099754f07f31fb43e9ce73817d72215749 Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Fri, 22 Sep 2023 10:30:28 +0000 Subject: [PATCH 031/143] fix: update weighting for mintBulkCross and forbid multiple owners + multipe tokens --- pallets/refungible/src/erc.rs | 14 +- tests/src/eth/reFungible.test.ts | 264 ++++++++++++++++++++++--------- 2 files changed, 204 insertions(+), 74 deletions(-) diff --git a/pallets/refungible/src/erc.rs b/pallets/refungible/src/erc.rs index aba39bfee7..51b1425482 100644 --- a/pallets/refungible/src/erc.rs +++ b/pallets/refungible/src/erc.rs @@ -1041,7 +1041,11 @@ where /// @notice Function to mint a token. /// @param tokenProperties Properties of minted token - #[weight(>::create_multiple_items(token_properties.len() as u32) + >::set_token_properties(token_properties.len() as u32))] + #[weight(if token_properties.len() == 1 { + >::create_multiple_items_ex_multiple_owners(token_properties.iter().next().unwrap().owners.len() as u32) + } else { + >::create_multiple_items_ex_multiple_items(token_properties.len() as u32) + } + >::set_token_properties(token_properties.len() as u32))] fn mint_bulk_cross( &mut self, caller: Caller, @@ -1051,9 +1055,17 @@ where let budget = self .recorder .weight_calls_budget(>::find_parent()); + let has_multiple_tokens = token_properties.len() > 1; let mut create_rft_data = Vec::with_capacity(token_properties.len()); for MintTokenData { owners, properties } in token_properties { + let has_multiple_owners = owners.len() > 1; + if has_multiple_tokens & has_multiple_owners { + return Err( + "creation of multiple tokens supported only if they have single owner each" + .into(), + ); + } let users: BoundedBTreeMap<_, _, _> = owners .into_iter() .map(|data| Ok((data.owner.into_sub_cross_account::()?, data.pieces))) diff --git a/tests/src/eth/reFungible.test.ts b/tests/src/eth/reFungible.test.ts index 66620725a0..5ed86047a9 100644 --- a/tests/src/eth/reFungible.test.ts +++ b/tests/src/eth/reFungible.test.ts @@ -126,13 +126,11 @@ describe('Refungible: Plain calls', () => { } }); - itEth('Can perform mintBulkCross()', async ({helper}) => { + itEth('Can perform mintBulkCross() with multiple tokens', async ({helper}) => { const caller = await helper.eth.createAccountWithBalance(donor); const callerCross = helper.ethCrossAccount.fromAddress(caller); const receiver = helper.eth.createAccount(); const receiverCross = helper.ethCrossAccount.fromAddress(receiver); - const receiver2 = helper.eth.createAccount(); - const receiver2Cross = helper.ethCrossAccount.fromAddress(receiver2); const permissions = [ {code: TokenPermissionField.Mutable, value: true}, @@ -160,81 +158,135 @@ describe('Refungible: Plain calls', () => { ).send(); const contract = await helper.ethNativeContract.collection(collectionAddress, 'rft', caller); - { - const nextTokenId = await contract.methods.nextTokenId().call(); - expect(nextTokenId).to.be.equal('1'); - const result = await contract.methods.mintBulkCross([ - { - owners: [{ - owner: receiverCross, - pieces: 1, - }], - properties: [ - {key: 'key_0_0', value: Buffer.from('value_0_0')}, - ], - }, + const nextTokenId = await contract.methods.nextTokenId().call(); + expect(nextTokenId).to.be.equal('1'); + const result = await contract.methods.mintBulkCross([ + { + owners: [{ + owner: receiverCross, + pieces: 1, + }], + properties: [ + {key: 'key_0_0', value: Buffer.from('value_0_0')}, + ], + }, + { + owners: [{ + owner: receiverCross, + pieces: 2, + }], + properties: [ + {key: 'key_1_0', value: Buffer.from('value_1_0')}, + {key: 'key_1_1', value: Buffer.from('value_1_1')}, + ], + }, + { + owners: [{ + owner: receiverCross, + pieces: 1, + }], + properties: [ + {key: 'key_2_0', value: Buffer.from('value_2_0')}, + {key: 'key_2_1', value: Buffer.from('value_2_1')}, + {key: 'key_2_2', value: Buffer.from('value_2_2')}, + ], + }, + ]).send({from: caller}); + const events = result.events.Transfer.sort((a: any, b: any) => +a.returnValues.tokenId - b.returnValues.tokenId); + const bulkSize = 3; + for(let i = 0; i < bulkSize; i++) { + const event = events[i]; + expect(event.address).to.equal(collectionAddress); + expect(event.returnValues.from).to.equal('0x0000000000000000000000000000000000000000'); + expect(event.returnValues.to).to.equal(receiver); + expect(event.returnValues.tokenId).to.equal(`${+nextTokenId + i}`); + } + + const properties = [ + await contract.methods.properties(+nextTokenId, []).call(), + await contract.methods.properties(+nextTokenId + 1, []).call(), + await contract.methods.properties(+nextTokenId + 2, []).call(), + ]; + expect(properties).to.be.deep.equal([ + [ + ['key_0_0', helper.getWeb3().utils.toHex('value_0_0')], + ], + [ + ['key_1_0', helper.getWeb3().utils.toHex('value_1_0')], + ['key_1_1', helper.getWeb3().utils.toHex('value_1_1')], + ], + [ + ['key_2_0', helper.getWeb3().utils.toHex('value_2_0')], + ['key_2_1', helper.getWeb3().utils.toHex('value_2_1')], + ['key_2_2', helper.getWeb3().utils.toHex('value_2_2')], + ], + ]); + }); + + itEth('Can perform mintBulkCross() with multiple owners', async ({helper}) => { + const caller = await helper.eth.createAccountWithBalance(donor); + const callerCross = helper.ethCrossAccount.fromAddress(caller); + const receiver = helper.eth.createAccount(); + const receiverCross = helper.ethCrossAccount.fromAddress(receiver); + const receiver2 = helper.eth.createAccount(); + const receiver2Cross = helper.ethCrossAccount.fromAddress(receiver2); + + const permissions = [ + {code: TokenPermissionField.Mutable, value: true}, + {code: TokenPermissionField.TokenOwner, value: true}, + {code: TokenPermissionField.CollectionAdmin, value: true}, + ]; + const {collectionAddress} = await helper.eth.createCollection( + caller, + { + ...CREATE_COLLECTION_DATA_DEFAULTS, + name: 'A', + description: 'B', + tokenPrefix: 'C', + collectionMode: 'rft', + adminList: [callerCross], + tokenPropertyPermissions: [ + {key: 'key_2_0', permissions}, + {key: 'key_2_1', permissions}, + {key: 'key_2_2', permissions}, + ], + }, + ).send(); + + const contract = await helper.ethNativeContract.collection(collectionAddress, 'rft', caller); + const nextTokenId = await contract.methods.nextTokenId().call(); + expect(nextTokenId).to.be.equal('1'); + const result = await contract.methods.mintBulkCross([{ + owners: [ { - owners: [{ - owner: receiverCross, - pieces: 2, - }], - properties: [ - {key: 'key_1_0', value: Buffer.from('value_1_0')}, - {key: 'key_1_1', value: Buffer.from('value_1_1')}, - ], + owner: receiverCross, + pieces: 1, }, { - owners: [ - { - owner: receiverCross, - pieces: 1, - }, - { - owner: receiver2Cross, - pieces: 2, - }, - ], - properties: [ - {key: 'key_2_0', value: Buffer.from('value_2_0')}, - {key: 'key_2_1', value: Buffer.from('value_2_1')}, - {key: 'key_2_2', value: Buffer.from('value_2_2')}, - ], + owner: receiver2Cross, + pieces: 2, }, - ]).send({from: caller}); - const events = result.events.Transfer.sort((a: any, b: any) => +a.returnValues.tokenId - b.returnValues.tokenId); - const bulkSize = 3; - for(let i = 0; i < bulkSize; i++) { - const event = events[i]; - expect(event.address).to.equal(collectionAddress); - expect(event.returnValues.from).to.equal('0x0000000000000000000000000000000000000000'); - if(i == 0 || i == 1) - expect(event.returnValues.to).to.equal(receiver); - else - expect(event.returnValues.to).to.equal('0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF'); - - expect(event.returnValues.tokenId).to.equal(`${+nextTokenId + i}`); - } + ], + properties: [ + {key: 'key_2_0', value: Buffer.from('value_2_0')}, + {key: 'key_2_1', value: Buffer.from('value_2_1')}, + {key: 'key_2_2', value: Buffer.from('value_2_2')}, + ], + }]).send({from: caller}); + const event = result.events.Transfer; + expect(event.address).to.equal(collectionAddress); + expect(event.returnValues.from).to.equal('0x0000000000000000000000000000000000000000'); + expect(event.returnValues.to).to.equal('0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF'); + expect(event.returnValues.tokenId).to.equal(`${+nextTokenId}`); - const properties = [ - await contract.methods.properties(+nextTokenId, []).call(), - await contract.methods.properties(+nextTokenId + 1, []).call(), - await contract.methods.properties(+nextTokenId + 2, []).call(), - ]; - expect(properties).to.be.deep.equal([ - [ - ['key_0_0', helper.getWeb3().utils.toHex('value_0_0')], - ], - [ - ['key_1_0', helper.getWeb3().utils.toHex('value_1_0')], - ['key_1_1', helper.getWeb3().utils.toHex('value_1_1')], - ], - [ - ['key_2_0', helper.getWeb3().utils.toHex('value_2_0')], - ['key_2_1', helper.getWeb3().utils.toHex('value_2_1')], - ['key_2_2', helper.getWeb3().utils.toHex('value_2_2')], - ], - ]); - } + const properties = [ + await contract.methods.properties(+nextTokenId, []).call(), + ]; + expect(properties).to.be.deep.equal([[ + ['key_2_0', helper.getWeb3().utils.toHex('value_2_0')], + ['key_2_1', helper.getWeb3().utils.toHex('value_2_1')], + ['key_2_2', helper.getWeb3().utils.toHex('value_2_2')], + ]]); }); itEth('Can perform setApprovalForAll()', async ({helper}) => { @@ -898,4 +950,70 @@ describe('Negative tests', () => { await expect(contract.methods.transferFromCross(ownerCross, recieverCross, token.tokenId).send({from: spender})).to.be.rejected; }); + + itEth('[negative] Can perform mintBulkCross() with multiple owners and multiple tokens', async ({helper}) => { + const caller = await helper.eth.createAccountWithBalance(donor); + const callerCross = helper.ethCrossAccount.fromAddress(caller); + const receiver = helper.eth.createAccount(); + const receiverCross = helper.ethCrossAccount.fromAddress(receiver); + const receiver2 = helper.eth.createAccount(); + const receiver2Cross = helper.ethCrossAccount.fromAddress(receiver2); + + const permissions = [ + {code: TokenPermissionField.Mutable, value: true}, + {code: TokenPermissionField.TokenOwner, value: true}, + {code: TokenPermissionField.CollectionAdmin, value: true}, + ]; + const {collectionAddress} = await helper.eth.createCollection( + caller, + { + ...CREATE_COLLECTION_DATA_DEFAULTS, + name: 'A', + description: 'B', + tokenPrefix: 'C', + collectionMode: 'rft', + adminList: [callerCross], + tokenPropertyPermissions: [ + {key: 'key_0_0', permissions}, + {key: 'key_2_0', permissions}, + {key: 'key_2_1', permissions}, + {key: 'key_2_2', permissions}, + ], + }, + ).send(); + + const contract = await helper.ethNativeContract.collection(collectionAddress, 'rft', caller); + const nextTokenId = await contract.methods.nextTokenId().call(); + expect(nextTokenId).to.be.equal('1'); + const createData = [ + { + owners: [{ + owner: receiverCross, + pieces: 1, + }], + properties: [ + {key: 'key_0_0', value: Buffer.from('value_0_0')}, + ], + }, + { + owners: [ + { + owner: receiverCross, + pieces: 1, + }, + { + owner: receiver2Cross, + pieces: 2, + }, + ], + properties: [ + {key: 'key_2_0', value: Buffer.from('value_2_0')}, + {key: 'key_2_1', value: Buffer.from('value_2_1')}, + {key: 'key_2_2', value: Buffer.from('value_2_2')}, + ], + }, + ]; + + await expect(contract.methods.mintBulkCross(createData).call({from: caller})).to.be.rejectedWith('creation of multiple tokens supported only if they have single owner each'); + }); }); From 6486f2a91bb1c01df4fb2831e5f5b8772a1b6709 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Mon, 25 Sep 2023 10:42:36 +0200 Subject: [PATCH 032/143] style: fix typos (#998) * fix typo * fix typos * fix typos --- README.md | 2 +- doc/vesting.md | 4 ++-- migrations.md | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index f85f5bac68..b0548b5332 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,7 @@ Optional, full setup with Acala and Statemint ## Run Integration Tests -1. Install all needed dependecies +1. Install all needed dependencies ``` cd tests yarn install diff --git a/doc/vesting.md b/doc/vesting.md index 67b9cb8cdd..1c8ec3b9ca 100644 --- a/doc/vesting.md +++ b/doc/vesting.md @@ -7,7 +7,7 @@ In order to optimize storage used for chain state, as well as keep the rest of f ## VestedTransfer ### Description -This method transfers tokens to a vesting account with pre-defined vesting period. The token will not show up as owned by the recepient address, but will not be show as owned by the previous owner (sender) either. Instead, it will be shown as owned by a special value address - Vesting address (with unknown private key). After the vesting timestamp, token can be claimed using VestingClaim method, which will transfer the token to the recipient address. +This method transfers tokens to a vesting account with pre-defined vesting period. The token will not show up as owned by the recipient address, but will not be shown as owned by the previous owner (sender) either. Instead, it will be shown as owned by a special value address - Vesting address (with unknown private key). After the vesting timestamp, token can be claimed using VestingClaim method, which will transfer the token to the recipient address. ### Permissions * Collection Owner @@ -38,7 +38,7 @@ This method transfers tokens to a vesting account with pre-defined vesting perio ## VestedClaim ### Description -This method transfers tokens from the vesting account to the recipient, which was defined in VestingTrasnfer transaction. This method may be called by anyone, and it will only work after the vesting timestamp. +This method transfers tokens from the vesting account to the recipient, which was defined in VestingTransfer transaction. This method may be called by anyone, and it will only work after the vesting timestamp. ### Permissions * Anyone diff --git a/migrations.md b/migrations.md index ac69c204bf..c9b74035ff 100644 --- a/migrations.md +++ b/migrations.md @@ -21,7 +21,7 @@ * Removed the previous migration of: * if the storage version is below 1, all collections from storage **CollectionById** of struct **Collection** version 1 to version 2, consisting of: * displacing _offchain_schema, variable_on_chain_schema, const_on_chain_schema, schema_version_ into _properties_ - * displacing _acccess, mint_mode_ into _permissions.access, permissions.mint_mode_ + * displacing _access, mint_mode_ into _permissions.access, permissions.mint_mode_ * adding _external_collection_ flag * Added unconditional bump of the storage version to 1 * Replaced returned weight `0` with `Weight::zero()` @@ -48,4 +48,4 @@ ### **pallet-unique:** * Removed the previous migration of: - * unconditional cleaning of all storage of **VariableMetaDataBasket** (cache for sponosoring setting deprecated variable metadata) + * unconditional cleaning of all storage of **VariableMetaDataBasket** (cache for sponsoring setting deprecated variable metadata) From 81bcea446d5b1a29b924cfe81f5b7372b7ae5abb Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 25 Sep 2023 16:56:56 +0200 Subject: [PATCH 033/143] fix: set token props only if something has changed --- pallets/common/src/lib.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index efcca90026..a8e01c2829 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -1391,10 +1391,9 @@ impl Pallet { if changed { >::deposit_log(log); + set_token_properties(stored_properties); } - set_token_properties(stored_properties); - Ok(()) } From e3c90797812c0e1e14601b09cc446975943ad922 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 25 Sep 2023 16:57:58 +0200 Subject: [PATCH 034/143] fix: dont read storage during minting when not needed --- pallets/nonfungible/src/lib.rs | 18 +++++++++++++++--- pallets/refungible/src/lib.rs | 18 +++++++++++++++--- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/pallets/nonfungible/src/lib.rs b/pallets/nonfungible/src/lib.rs index 5889431482..c61d9bf837 100644 --- a/pallets/nonfungible/src/lib.rs +++ b/pallets/nonfungible/src/lib.rs @@ -620,10 +620,22 @@ impl Pallet { Ok(is_owned) }); - let mut is_token_exist = - pallet_common::LazyValue::new(|| Self::token_exists(collection, token_id)); + let is_new_token = matches!(mode, SetPropertyMode::NewToken { .. }); - let stored_properties = >::get((collection.id, token_id)); + let mut is_token_exist = pallet_common::LazyValue::new(|| { + if is_new_token { + debug_assert!(Self::token_exists(collection, token_id)); + true + } else { + Self::token_exists(collection, token_id) + } + }); + + let stored_properties = if is_new_token { + TokenPropertiesT::new() + } else { + >::get((collection.id, token_id)) + }; >::modify_token_properties( collection, diff --git a/pallets/refungible/src/lib.rs b/pallets/refungible/src/lib.rs index 0aab9cafd8..24f9ee1d71 100644 --- a/pallets/refungible/src/lib.rs +++ b/pallets/refungible/src/lib.rs @@ -563,10 +563,22 @@ impl Pallet { Ok(is_bundle_owner) }); - let mut is_token_exist = - pallet_common::LazyValue::new(|| Self::token_exists(collection, token_id)); + let is_new_token = matches!(mode, SetPropertyMode::NewToken { .. }); - let stored_properties = >::get((collection.id, token_id)); + let mut is_token_exist = pallet_common::LazyValue::new(|| { + if is_new_token { + debug_assert!(Self::token_exists(collection, token_id)); + true + } else { + Self::token_exists(collection, token_id) + } + }); + + let stored_properties = if is_new_token { + TokenPropertiesT::new() + } else { + >::get((collection.id, token_id)) + }; >::modify_token_properties( collection, From 9c8e0787fab16c26860fd0ac207539f5c5da17ae Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 25 Sep 2023 16:59:02 +0200 Subject: [PATCH 035/143] feat: add reset_token_properties benchmark --- pallets/nonfungible/src/benchmarking.rs | 22 ++++++++++++++++++++++ pallets/nonfungible/src/common.rs | 10 ++++++---- pallets/refungible/src/benchmarking.rs | 22 ++++++++++++++++++++++ pallets/refungible/src/common.rs | 2 +- 4 files changed, 51 insertions(+), 5 deletions(-) diff --git a/pallets/nonfungible/src/benchmarking.rs b/pallets/nonfungible/src/benchmarking.rs index ec582d4762..233c2399b7 100644 --- a/pallets/nonfungible/src/benchmarking.rs +++ b/pallets/nonfungible/src/benchmarking.rs @@ -200,6 +200,28 @@ benchmarks! { let item = create_max_item(&collection, &owner, owner.clone())?; }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), SetPropertyMode::ExistingToken, &Unlimited)?} + reset_token_properties { + let b in 0..MAX_PROPERTIES_PER_ITEM; + bench_init!{ + owner: sub; collection: collection(owner); + owner: cross_from_sub; + }; + let perms = (0..b).map(|k| PropertyKeyPermission { + key: property_key(k as usize), + permission: PropertyPermission { + mutable: false, + collection_admin: true, + token_owner: true, + }, + }).collect::>(); + >::set_token_property_permissions(&collection, &owner, perms)?; + let props = (0..b).map(|k| Property { + key: property_key(k as usize), + value: property_value(), + }).collect::>(); + let item = create_max_item(&collection, &owner, owner.clone())?; + }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), SetPropertyMode::NewToken { mint_target_is_sender: true }, &Unlimited)?} + delete_token_properties { let b in 0..MAX_PROPERTIES_PER_ITEM; bench_init!{ diff --git a/pallets/nonfungible/src/common.rs b/pallets/nonfungible/src/common.rs index 9044e15fa6..bf2f2118d7 100644 --- a/pallets/nonfungible/src/common.rs +++ b/pallets/nonfungible/src/common.rs @@ -42,7 +42,9 @@ impl CommonWeightInfo for CommonWeights { + t.iter() .filter_map(|t| { if t.properties.len() > 0 { - Some(Self::set_token_properties(t.properties.len() as u32)) + Some(>::reset_token_properties( + t.properties.len() as u32, + )) } else { None } @@ -58,9 +60,9 @@ impl CommonWeightInfo for CommonWeights { + data .iter() .filter_map(|t| match t { - up_data_structs::CreateItemData::NFT(n) if n.properties.len() > 0 => { - Some(Self::set_token_properties(n.properties.len() as u32)) - } + up_data_structs::CreateItemData::NFT(n) if n.properties.len() > 0 => Some( + >::reset_token_properties(n.properties.len() as u32), + ), _ => None, }) .fold(Weight::zero(), |a, b| a.saturating_add(b)) diff --git a/pallets/refungible/src/benchmarking.rs b/pallets/refungible/src/benchmarking.rs index 67efadaa05..8fe7629374 100644 --- a/pallets/refungible/src/benchmarking.rs +++ b/pallets/refungible/src/benchmarking.rs @@ -257,6 +257,28 @@ benchmarks! { let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), SetPropertyMode::ExistingToken, &Unlimited)?} + reset_token_properties { + let b in 0..MAX_PROPERTIES_PER_ITEM; + bench_init!{ + owner: sub; collection: collection(owner); + owner: cross_from_sub; + }; + let perms = (0..b).map(|k| PropertyKeyPermission { + key: property_key(k as usize), + permission: PropertyPermission { + mutable: false, + collection_admin: true, + token_owner: true, + }, + }).collect::>(); + >::set_token_property_permissions(&collection, &owner, perms)?; + let props = (0..b).map(|k| Property { + key: property_key(k as usize), + value: property_value(), + }).collect::>(); + let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; + }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), SetPropertyMode::NewToken { mint_target_is_sender: true }, &Unlimited)?} + delete_token_properties { let b in 0..MAX_PROPERTIES_PER_ITEM; bench_init!{ diff --git a/pallets/refungible/src/common.rs b/pallets/refungible/src/common.rs index a952a29df8..b94a8789e3 100644 --- a/pallets/refungible/src/common.rs +++ b/pallets/refungible/src/common.rs @@ -47,7 +47,7 @@ macro_rules! max_weight_of { fn properties_weight(properties: &CollectionPropertiesVec) -> Weight { if properties.len() > 0 { - >::set_token_properties(properties.len() as u32) + >::reset_token_properties(properties.len() as u32) } else { Weight::zero() } From d63678e90a0e9e3dfe53742e2381bc354a24ed97 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 25 Sep 2023 17:00:08 +0200 Subject: [PATCH 036/143] feat: add integration-tests profile and use in CI --- .docker/Dockerfile-chain-dev | 3 +-- .docker/Dockerfile-unique | 2 +- .docker/docker-compose.gov.j2 | 2 +- Cargo.toml | 4 ++++ 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.docker/Dockerfile-chain-dev b/.docker/Dockerfile-chain-dev index 2cfd83166c..a220ad8bbe 100644 --- a/.docker/Dockerfile-chain-dev +++ b/.docker/Dockerfile-chain-dev @@ -21,7 +21,6 @@ COPY . /dev_chain WORKDIR /dev_chain -RUN cargo build --release RUN echo "$NETWORK" -CMD cargo run --release --features=${NETWORK}-runtime -- --dev -linfo --rpc-cors=all --unsafe-rpc-external +CMD cargo run --profile integration-tests --features=${NETWORK}-runtime -- --dev -linfo --rpc-cors=all --unsafe-rpc-external diff --git a/.docker/Dockerfile-unique b/.docker/Dockerfile-unique index 494cc90909..2779413575 100644 --- a/.docker/Dockerfile-unique +++ b/.docker/Dockerfile-unique @@ -47,7 +47,7 @@ RUN --mount=type=cache,target=/cargo-home/registry \ --mount=type=cache,target=/unique_parachain/unique-chain/target \ cd unique-chain && \ echo "Using runtime features '$RUNTIME_FEATURES'" && \ - CARGO_INCREMENTAL=0 cargo build --release --features="$RUNTIME_FEATURES" --locked && \ + CARGO_INCREMENTAL=0 cargo build --profile integration-tests --features="$RUNTIME_FEATURES" --locked && \ mv ./target/release/unique-collator /unique_parachain/unique-chain/ && \ cd target/release/wbuild && find . -name "*.wasm" -exec sh -c 'mkdir -p "../../../wasm/$(dirname {})"; cp {} "../../../wasm/{}"' \; diff --git a/.docker/docker-compose.gov.j2 b/.docker/docker-compose.gov.j2 index df59ce2043..d08c0ef902 100644 --- a/.docker/docker-compose.gov.j2 +++ b/.docker/docker-compose.gov.j2 @@ -21,4 +21,4 @@ services: options: max-size: "1m" max-file: "3" - command: cargo run --release --features={{ NETWORK }}-runtime,gov-test-timings -- --dev -linfo --rpc-cors=all --unsafe-rpc-external + command: cargo run --profile integration-tests --features={{ NETWORK }}-runtime,gov-test-timings -- --dev -linfo --rpc-cors=all --unsafe-rpc-external diff --git a/Cargo.toml b/Cargo.toml index 5aeeb8bc53..898aa16eea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,10 @@ inherits = "release" lto = true opt-level = 3 +[profile.integration-tests] +inherits = "release" +debug-assertions = true + [workspace.dependencies] # Unique app-promotion-rpc = { path = "primitives/app_promotion_rpc", default-features = false } From 5ec8b824c3ad063259fc4a5bb7d1da026e970621 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 25 Sep 2023 17:00:49 +0200 Subject: [PATCH 037/143] chore: add temporary nft/rft weights --- pallets/nonfungible/src/weights.rs | 373 +++++++++++---------- pallets/refungible/src/weights.rs | 517 +++++++++++++++-------------- 2 files changed, 468 insertions(+), 422 deletions(-) diff --git a/pallets/nonfungible/src/weights.rs b/pallets/nonfungible/src/weights.rs index f00b840e3f..c4f2feedf5 100644 --- a/pallets/nonfungible/src/weights.rs +++ b/pallets/nonfungible/src/weights.rs @@ -3,9 +3,9 @@ //! Autogenerated weights for pallet_nonfungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-25, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` +//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -46,6 +46,7 @@ pub trait WeightInfo { fn burn_from() -> Weight; fn set_token_property_permissions(b: u32, ) -> Weight; fn set_token_properties(b: u32, ) -> Weight; + fn reset_token_properties(b: u32, ) -> Weight; fn delete_token_properties(b: u32, ) -> Weight; fn token_owner() -> Weight; fn set_allowance_for_all() -> Weight; @@ -60,8 +61,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:1 w:1) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:1) @@ -70,19 +69,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn create_item() -> Weight { // Proof Size summary in bytes: - // Measured: `390` - // Estimated: `63471` - // Minimum execution time: 25_892_000 picoseconds. - Weight::from_parts(26_424_000, 63471) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + // Measured: `456` + // Estimated: `20191` + // Minimum execution time: 10_660_000 picoseconds. + Weight::from_parts(10_960_000, 20191) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: Nonfungible TokensMinted (r:1 w:1) /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:1 w:1) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:200 w:200) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:200) @@ -92,24 +89,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `390` - // Estimated: `28192 + b * (35279 ±0)` - // Minimum execution time: 4_612_000 picoseconds. - Weight::from_parts(6_399_460, 28192) - // Standard Error: 5_119 - .saturating_add(Weight::from_parts(7_230_389, 0).saturating_mul(b.into())) + // Measured: `456` + // Estimated: `20191` + // Minimum execution time: 2_200_000 picoseconds. + Weight::from_parts(612_529, 20191) + // Standard Error: 737 + .saturating_add(Weight::from_parts(3_807_625, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 35279).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) } /// Storage: Nonfungible TokensMinted (r:1 w:1) /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:200 w:200) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:200 w:200) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:200) @@ -119,17 +112,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `390` - // Estimated: `25652 + b * (37819 ±0)` - // Minimum execution time: 4_538_000 picoseconds. - Weight::from_parts(4_686_000, 25652) - // Standard Error: 3_518 - .saturating_add(Weight::from_parts(8_905_771, 0).saturating_mul(b.into())) + // Measured: `456` + // Estimated: `20191 + b * (2540 ±0)` + // Minimum execution time: 2_190_000 picoseconds. + Weight::from_parts(2_280_000, 20191) + // Standard Error: 1_011 + .saturating_add(Weight::from_parts(4_931_681, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 37819).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) + .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } /// Storage: Nonfungible TokenData (r:1 w:1) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) @@ -148,9 +141,9 @@ impl WeightInfo for SubstrateWeight { fn burn_item() -> Weight { // Proof Size summary in bytes: // Measured: `380` - // Estimated: `17561` - // Minimum execution time: 24_230_000 picoseconds. - Weight::from_parts(24_672_000, 17561) + // Estimated: `3530` + // Minimum execution time: 13_470_000 picoseconds. + Weight::from_parts(13_840_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -171,9 +164,9 @@ impl WeightInfo for SubstrateWeight { fn burn_recursively_self_raw() -> Weight { // Proof Size summary in bytes: // Measured: `380` - // Estimated: `17561` - // Minimum execution time: 30_521_000 picoseconds. - Weight::from_parts(31_241_000, 17561) + // Estimated: `3530` + // Minimum execution time: 16_940_000 picoseconds. + Weight::from_parts(17_340_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -196,17 +189,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 200]`. fn burn_recursively_breadth_plus_self_plus_self_per_each_raw(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1467 + b * (58 ±0)` - // Estimated: `24230 + b * (10097 ±0)` - // Minimum execution time: 31_734_000 picoseconds. - Weight::from_parts(32_162_000, 24230) - // Standard Error: 210_514 - .saturating_add(Weight::from_parts(71_382_804, 0).saturating_mul(b.into())) + // Measured: `1500 + b * (58 ±0)` + // Estimated: `5874 + b * (5032 ±0)` + // Minimum execution time: 16_910_000 picoseconds. + Weight::from_parts(17_170_000, 5874) + // Standard Error: 102_760 + .saturating_add(Weight::from_parts(45_644_966, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 10097).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 5032).saturating_mul(b.into())) } /// Storage: Nonfungible TokenData (r:1 w:1) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) @@ -219,9 +212,9 @@ impl WeightInfo for SubstrateWeight { fn transfer_raw() -> Weight { // Proof Size summary in bytes: // Measured: `380` - // Estimated: `13114` - // Minimum execution time: 18_305_000 picoseconds. - Weight::from_parts(18_859_000, 13114) + // Estimated: `6070` + // Minimum execution time: 9_420_000 picoseconds. + Weight::from_parts(9_710_000, 6070) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -232,9 +225,9 @@ impl WeightInfo for SubstrateWeight { fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `326` - // Estimated: `7044` - // Minimum execution time: 10_977_000 picoseconds. - Weight::from_parts(11_184_000, 7044) + // Estimated: `3522` + // Minimum execution time: 5_400_000 picoseconds. + Weight::from_parts(5_640_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -245,9 +238,9 @@ impl WeightInfo for SubstrateWeight { fn approve_from() -> Weight { // Proof Size summary in bytes: // Measured: `313` - // Estimated: `7044` - // Minimum execution time: 11_456_000 picoseconds. - Weight::from_parts(11_731_000, 7044) + // Estimated: `3522` + // Minimum execution time: 5_390_000 picoseconds. + Weight::from_parts(5_610_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -257,8 +250,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 5_771_000 picoseconds. - Weight::from_parts(5_972_000, 3522) + // Minimum execution time: 2_970_000 picoseconds. + Weight::from_parts(3_170_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -278,9 +271,9 @@ impl WeightInfo for SubstrateWeight { fn burn_from() -> Weight { // Proof Size summary in bytes: // Measured: `463` - // Estimated: `17561` - // Minimum execution time: 30_633_000 picoseconds. - Weight::from_parts(31_136_000, 17561) + // Estimated: `3530` + // Minimum execution time: 15_400_000 picoseconds. + Weight::from_parts(15_850_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -289,12 +282,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 64]`. fn set_token_property_permissions(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `281` + // Measured: `314` // Estimated: `20191` - // Minimum execution time: 2_300_000 picoseconds. - Weight::from_parts(2_382_000, 20191) - // Standard Error: 45_076 - .saturating_add(Weight::from_parts(12_000_777, 0).saturating_mul(b.into())) + // Minimum execution time: 780_000 picoseconds. + Weight::from_parts(830_000, 20191) + // Standard Error: 35_354 + .saturating_add(Weight::from_parts(8_422_695, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -302,32 +295,52 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Nonfungible TokenData (r:1 w:0) + /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn set_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `584 + b * (261 ±0)` - // Estimated: `56460` - // Minimum execution time: 12_422_000 picoseconds. - Weight::from_parts(5_523_689, 56460) - // Standard Error: 74_137 - .saturating_add(Weight::from_parts(6_320_501, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `640 + b * (261 ±0)` + // Estimated: `36269` + // Minimum execution time: 5_530_000 picoseconds. + Weight::from_parts(10_829_898, 36269) + // Standard Error: 5_106 + .saturating_add(Weight::from_parts(2_601_900, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: Common CollectionPropertyPermissions (r:1 w:0) + /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Nonfungible TokenProperties (r:0 w:1) + /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// The range of component `b` is `[0, 64]`. + fn reset_token_properties(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `317 + b * (261 ±0)` + // Estimated: `20191` + // Minimum execution time: 3_690_000 picoseconds. + Weight::from_parts(9_633_879, 20191) + // Standard Error: 6_573 + .saturating_add(Weight::from_parts(2_694_801, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Nonfungible TokenData (r:1 w:0) + /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn delete_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `589 + b * (33291 ±0)` - // Estimated: `56460` - // Minimum execution time: 12_006_000 picoseconds. - Weight::from_parts(12_216_000, 56460) - // Standard Error: 83_431 - .saturating_add(Weight::from_parts(24_556_999, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `699 + b * (33291 ±0)` + // Estimated: `36269` + // Minimum execution time: 5_860_000 picoseconds. + Weight::from_parts(6_000_000, 36269) + // Standard Error: 26_204 + .saturating_add(Weight::from_parts(9_601_645, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Nonfungible TokenData (r:1 w:0) @@ -336,8 +349,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 4_827_000 picoseconds. - Weight::from_parts(4_984_000, 3522) + // Minimum execution time: 2_930_000 picoseconds. + Weight::from_parts(3_060_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -346,28 +359,28 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_151_000 picoseconds. - Weight::from_parts(6_394_000, 0) + // Minimum execution time: 2_680_000 picoseconds. + Weight::from_parts(2_780_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) /// Proof: Nonfungible CollectionAllowance (max_values: None, max_size: Some(111), added: 2586, mode: MaxEncodedLen) fn allowance_for_all() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `142` // Estimated: `3576` - // Minimum execution time: 3_791_000 picoseconds. - Weight::from_parts(3_950_000, 3576) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_110_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) fn repair_item() -> Weight { // Proof Size summary in bytes: - // Measured: `300` + // Measured: `279` // Estimated: `36269` - // Minimum execution time: 5_364_000 picoseconds. - Weight::from_parts(5_539_000, 36269) + // Minimum execution time: 2_200_000 picoseconds. + Weight::from_parts(2_340_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -379,8 +392,6 @@ impl WeightInfo for () { /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:1 w:1) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:1) @@ -389,19 +400,17 @@ impl WeightInfo for () { /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn create_item() -> Weight { // Proof Size summary in bytes: - // Measured: `390` - // Estimated: `63471` - // Minimum execution time: 25_892_000 picoseconds. - Weight::from_parts(26_424_000, 63471) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + // Measured: `456` + // Estimated: `20191` + // Minimum execution time: 10_660_000 picoseconds. + Weight::from_parts(10_960_000, 20191) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: Nonfungible TokensMinted (r:1 w:1) /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:1 w:1) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:200 w:200) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:200) @@ -411,24 +420,20 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `390` - // Estimated: `28192 + b * (35279 ±0)` - // Minimum execution time: 4_612_000 picoseconds. - Weight::from_parts(6_399_460, 28192) - // Standard Error: 5_119 - .saturating_add(Weight::from_parts(7_230_389, 0).saturating_mul(b.into())) + // Measured: `456` + // Estimated: `20191` + // Minimum execution time: 2_200_000 picoseconds. + Weight::from_parts(612_529, 20191) + // Standard Error: 737 + .saturating_add(Weight::from_parts(3_807_625, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 35279).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) } /// Storage: Nonfungible TokensMinted (r:1 w:1) /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:200 w:200) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:200 w:200) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:200) @@ -438,17 +443,17 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `390` - // Estimated: `25652 + b * (37819 ±0)` - // Minimum execution time: 4_538_000 picoseconds. - Weight::from_parts(4_686_000, 25652) - // Standard Error: 3_518 - .saturating_add(Weight::from_parts(8_905_771, 0).saturating_mul(b.into())) + // Measured: `456` + // Estimated: `20191 + b * (2540 ±0)` + // Minimum execution time: 2_190_000 picoseconds. + Weight::from_parts(2_280_000, 20191) + // Standard Error: 1_011 + .saturating_add(Weight::from_parts(4_931_681, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 37819).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) + .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } /// Storage: Nonfungible TokenData (r:1 w:1) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) @@ -467,9 +472,9 @@ impl WeightInfo for () { fn burn_item() -> Weight { // Proof Size summary in bytes: // Measured: `380` - // Estimated: `17561` - // Minimum execution time: 24_230_000 picoseconds. - Weight::from_parts(24_672_000, 17561) + // Estimated: `3530` + // Minimum execution time: 13_470_000 picoseconds. + Weight::from_parts(13_840_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -490,9 +495,9 @@ impl WeightInfo for () { fn burn_recursively_self_raw() -> Weight { // Proof Size summary in bytes: // Measured: `380` - // Estimated: `17561` - // Minimum execution time: 30_521_000 picoseconds. - Weight::from_parts(31_241_000, 17561) + // Estimated: `3530` + // Minimum execution time: 16_940_000 picoseconds. + Weight::from_parts(17_340_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -515,17 +520,17 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 200]`. fn burn_recursively_breadth_plus_self_plus_self_per_each_raw(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1467 + b * (58 ±0)` - // Estimated: `24230 + b * (10097 ±0)` - // Minimum execution time: 31_734_000 picoseconds. - Weight::from_parts(32_162_000, 24230) - // Standard Error: 210_514 - .saturating_add(Weight::from_parts(71_382_804, 0).saturating_mul(b.into())) + // Measured: `1500 + b * (58 ±0)` + // Estimated: `5874 + b * (5032 ±0)` + // Minimum execution time: 16_910_000 picoseconds. + Weight::from_parts(17_170_000, 5874) + // Standard Error: 102_760 + .saturating_add(Weight::from_parts(45_644_966, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 10097).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 5032).saturating_mul(b.into())) } /// Storage: Nonfungible TokenData (r:1 w:1) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) @@ -538,9 +543,9 @@ impl WeightInfo for () { fn transfer_raw() -> Weight { // Proof Size summary in bytes: // Measured: `380` - // Estimated: `13114` - // Minimum execution time: 18_305_000 picoseconds. - Weight::from_parts(18_859_000, 13114) + // Estimated: `6070` + // Minimum execution time: 9_420_000 picoseconds. + Weight::from_parts(9_710_000, 6070) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -551,9 +556,9 @@ impl WeightInfo for () { fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `326` - // Estimated: `7044` - // Minimum execution time: 10_977_000 picoseconds. - Weight::from_parts(11_184_000, 7044) + // Estimated: `3522` + // Minimum execution time: 5_400_000 picoseconds. + Weight::from_parts(5_640_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -564,9 +569,9 @@ impl WeightInfo for () { fn approve_from() -> Weight { // Proof Size summary in bytes: // Measured: `313` - // Estimated: `7044` - // Minimum execution time: 11_456_000 picoseconds. - Weight::from_parts(11_731_000, 7044) + // Estimated: `3522` + // Minimum execution time: 5_390_000 picoseconds. + Weight::from_parts(5_610_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -576,8 +581,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 5_771_000 picoseconds. - Weight::from_parts(5_972_000, 3522) + // Minimum execution time: 2_970_000 picoseconds. + Weight::from_parts(3_170_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -597,9 +602,9 @@ impl WeightInfo for () { fn burn_from() -> Weight { // Proof Size summary in bytes: // Measured: `463` - // Estimated: `17561` - // Minimum execution time: 30_633_000 picoseconds. - Weight::from_parts(31_136_000, 17561) + // Estimated: `3530` + // Minimum execution time: 15_400_000 picoseconds. + Weight::from_parts(15_850_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -608,12 +613,12 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 64]`. fn set_token_property_permissions(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `281` + // Measured: `314` // Estimated: `20191` - // Minimum execution time: 2_300_000 picoseconds. - Weight::from_parts(2_382_000, 20191) - // Standard Error: 45_076 - .saturating_add(Weight::from_parts(12_000_777, 0).saturating_mul(b.into())) + // Minimum execution time: 780_000 picoseconds. + Weight::from_parts(830_000, 20191) + // Standard Error: 35_354 + .saturating_add(Weight::from_parts(8_422_695, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -621,32 +626,52 @@ impl WeightInfo for () { /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Nonfungible TokenData (r:1 w:0) + /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn set_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `584 + b * (261 ±0)` - // Estimated: `56460` - // Minimum execution time: 12_422_000 picoseconds. - Weight::from_parts(5_523_689, 56460) - // Standard Error: 74_137 - .saturating_add(Weight::from_parts(6_320_501, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `640 + b * (261 ±0)` + // Estimated: `36269` + // Minimum execution time: 5_530_000 picoseconds. + Weight::from_parts(10_829_898, 36269) + // Standard Error: 5_106 + .saturating_add(Weight::from_parts(2_601_900, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: Common CollectionPropertyPermissions (r:1 w:0) + /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Nonfungible TokenProperties (r:0 w:1) + /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// The range of component `b` is `[0, 64]`. + fn reset_token_properties(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `317 + b * (261 ±0)` + // Estimated: `20191` + // Minimum execution time: 3_690_000 picoseconds. + Weight::from_parts(9_633_879, 20191) + // Standard Error: 6_573 + .saturating_add(Weight::from_parts(2_694_801, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Nonfungible TokenData (r:1 w:0) + /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn delete_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `589 + b * (33291 ±0)` - // Estimated: `56460` - // Minimum execution time: 12_006_000 picoseconds. - Weight::from_parts(12_216_000, 56460) - // Standard Error: 83_431 - .saturating_add(Weight::from_parts(24_556_999, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `699 + b * (33291 ±0)` + // Estimated: `36269` + // Minimum execution time: 5_860_000 picoseconds. + Weight::from_parts(6_000_000, 36269) + // Standard Error: 26_204 + .saturating_add(Weight::from_parts(9_601_645, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Nonfungible TokenData (r:1 w:0) @@ -655,8 +680,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 4_827_000 picoseconds. - Weight::from_parts(4_984_000, 3522) + // Minimum execution time: 2_930_000 picoseconds. + Weight::from_parts(3_060_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -665,28 +690,28 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_151_000 picoseconds. - Weight::from_parts(6_394_000, 0) + // Minimum execution time: 2_680_000 picoseconds. + Weight::from_parts(2_780_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) /// Proof: Nonfungible CollectionAllowance (max_values: None, max_size: Some(111), added: 2586, mode: MaxEncodedLen) fn allowance_for_all() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `142` // Estimated: `3576` - // Minimum execution time: 3_791_000 picoseconds. - Weight::from_parts(3_950_000, 3576) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_110_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) fn repair_item() -> Weight { // Proof Size summary in bytes: - // Measured: `300` + // Measured: `279` // Estimated: `36269` - // Minimum execution time: 5_364_000 picoseconds. - Weight::from_parts(5_539_000, 36269) + // Minimum execution time: 2_200_000 picoseconds. + Weight::from_parts(2_340_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/refungible/src/weights.rs b/pallets/refungible/src/weights.rs index 58ea241ab5..c3f1621fd3 100644 --- a/pallets/refungible/src/weights.rs +++ b/pallets/refungible/src/weights.rs @@ -3,9 +3,9 @@ //! Autogenerated weights for pallet_refungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-25, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` +//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -52,6 +52,7 @@ pub trait WeightInfo { fn burn_from() -> Weight; fn set_token_property_permissions(b: u32, ) -> Weight; fn set_token_properties(b: u32, ) -> Weight; + fn reset_token_properties(b: u32, ) -> Weight; fn delete_token_properties(b: u32, ) -> Weight; fn repartition_item() -> Weight; fn token_owner() -> Weight; @@ -67,8 +68,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:1 w:1) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:1) @@ -79,19 +78,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn create_item() -> Weight { // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `63471` - // Minimum execution time: 30_759_000 picoseconds. - Weight::from_parts(31_321_000, 63471) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Measured: `318` + // Estimated: `20191` + // Minimum execution time: 11_620_000 picoseconds. + Weight::from_parts(12_110_000, 20191) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: Refungible TokensMinted (r:1 w:1) /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:1 w:1) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:200 w:200) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) @@ -103,24 +100,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `28192 + b * (35279 ±0)` - // Minimum execution time: 4_024_000 picoseconds. - Weight::from_parts(4_145_000, 28192) - // Standard Error: 3_332 - .saturating_add(Weight::from_parts(8_967_757, 0).saturating_mul(b.into())) + // Measured: `318` + // Estimated: `20191` + // Minimum execution time: 1_690_000 picoseconds. + Weight::from_parts(1_750_000, 20191) + // Standard Error: 1_647 + .saturating_add(Weight::from_parts(4_876_194, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 35279).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) } /// Storage: Refungible TokensMinted (r:1 w:1) /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:200 w:200) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:200 w:200) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) @@ -132,24 +125,22 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `25652 + b * (37819 ±0)` - // Minimum execution time: 3_715_000 picoseconds. - Weight::from_parts(3_881_000, 25652) - // Standard Error: 3_275 - .saturating_add(Weight::from_parts(10_525_271, 0).saturating_mul(b.into())) + // Measured: `318` + // Estimated: `20191 + b * (2540 ±0)` + // Minimum execution time: 1_730_000 picoseconds. + Weight::from_parts(1_790_000, 20191) + // Standard Error: 1_611 + .saturating_add(Weight::from_parts(6_002_948, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 37819).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(b.into()))) + .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } /// Storage: Refungible TokensMinted (r:1 w:1) /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:200 w:200) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) @@ -161,15 +152,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_owners(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `60931 + b * (2540 ±0)` - // Minimum execution time: 13_150_000 picoseconds. - Weight::from_parts(15_655_930, 60931) - // Standard Error: 4_170 - .saturating_add(Weight::from_parts(5_673_702, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `318` + // Estimated: `20191 + b * (2540 ±0)` + // Minimum execution time: 6_260_000 picoseconds. + Weight::from_parts(2_297_892, 20191) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(4_093_180, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } @@ -183,10 +174,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn burn_item_partial() -> Weight { // Proof Size summary in bytes: - // Measured: `490` - // Estimated: `15717` - // Minimum execution time: 28_992_000 picoseconds. - Weight::from_parts(29_325_000, 15717) + // Measured: `456` + // Estimated: `8682` + // Minimum execution time: 17_330_000 picoseconds. + Weight::from_parts(17_910_000, 8682) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -204,10 +195,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) fn burn_item_fully() -> Weight { // Proof Size summary in bytes: - // Measured: `375` - // Estimated: `14070` - // Minimum execution time: 27_980_000 picoseconds. - Weight::from_parts(28_582_000, 14070) + // Measured: `341` + // Estimated: `3554` + // Minimum execution time: 16_920_000 picoseconds. + Weight::from_parts(17_630_000, 3554) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -217,10 +208,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) fn transfer_normal() -> Weight { // Proof Size summary in bytes: - // Measured: `398` - // Estimated: `9623` - // Minimum execution time: 18_746_000 picoseconds. - Weight::from_parts(19_096_000, 9623) + // Measured: `365` + // Estimated: `6118` + // Minimum execution time: 10_600_000 picoseconds. + Weight::from_parts(10_950_000, 6118) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -234,10 +225,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `375` - // Estimated: `13153` - // Minimum execution time: 21_719_000 picoseconds. - Weight::from_parts(22_219_000, 13153) + // Measured: `341` + // Estimated: `6118` + // Minimum execution time: 12_160_000 picoseconds. + Weight::from_parts(12_570_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -251,10 +242,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_removing() -> Weight { // Proof Size summary in bytes: - // Measured: `490` - // Estimated: `13153` - // Minimum execution time: 24_784_000 picoseconds. - Weight::from_parts(25_231_000, 13153) + // Measured: `456` + // Estimated: `6118` + // Minimum execution time: 14_300_000 picoseconds. + Weight::from_parts(14_760_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -268,10 +259,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_creating_removing() -> Weight { // Proof Size summary in bytes: - // Measured: `375` - // Estimated: `15693` - // Minimum execution time: 24_865_000 picoseconds. - Weight::from_parts(25_253_000, 15693) + // Measured: `341` + // Estimated: `6118` + // Minimum execution time: 14_050_000 picoseconds. + Weight::from_parts(14_490_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -281,10 +272,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) fn approve() -> Weight { // Proof Size summary in bytes: - // Measured: `256` + // Measured: `223` // Estimated: `3554` - // Minimum execution time: 12_318_000 picoseconds. - Weight::from_parts(12_597_000, 3554) + // Minimum execution time: 6_210_000 picoseconds. + Weight::from_parts(6_500_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -294,10 +285,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) fn approve_from() -> Weight { // Proof Size summary in bytes: - // Measured: `244` + // Measured: `211` // Estimated: `3554` - // Minimum execution time: 12_276_000 picoseconds. - Weight::from_parts(12_557_000, 3554) + // Minimum execution time: 6_280_000 picoseconds. + Weight::from_parts(6_540_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -309,10 +300,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) fn transfer_from_normal() -> Weight { // Proof Size summary in bytes: - // Measured: `528` - // Estimated: `13193` - // Minimum execution time: 26_852_000 picoseconds. - Weight::from_parts(27_427_000, 13193) + // Measured: `495` + // Estimated: `6118` + // Minimum execution time: 15_570_000 picoseconds. + Weight::from_parts(15_990_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -328,10 +319,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_from_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `16723` - // Minimum execution time: 29_893_000 picoseconds. - Weight::from_parts(30_345_000, 16723) + // Measured: `471` + // Estimated: `6118` + // Minimum execution time: 17_030_000 picoseconds. + Weight::from_parts(17_430_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -347,10 +338,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_from_removing() -> Weight { // Proof Size summary in bytes: - // Measured: `620` - // Estimated: `16723` - // Minimum execution time: 32_784_000 picoseconds. - Weight::from_parts(33_322_000, 16723) + // Measured: `586` + // Estimated: `6118` + // Minimum execution time: 19_300_000 picoseconds. + Weight::from_parts(19_760_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -366,10 +357,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_from_creating_removing() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `19263` - // Minimum execution time: 32_987_000 picoseconds. - Weight::from_parts(33_428_000, 19263) + // Measured: `471` + // Estimated: `6118` + // Minimum execution time: 19_190_000 picoseconds. + Weight::from_parts(19_560_000, 6118) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -389,10 +380,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) fn burn_from() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `17640` - // Minimum execution time: 38_277_000 picoseconds. - Weight::from_parts(38_983_000, 17640) + // Measured: `471` + // Estimated: `3570` + // Minimum execution time: 22_590_000 picoseconds. + Weight::from_parts(23_290_000, 3570) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -401,12 +392,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 64]`. fn set_token_property_permissions(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `281` + // Measured: `314` // Estimated: `20191` - // Minimum execution time: 2_254_000 picoseconds. - Weight::from_parts(2_335_000, 20191) - // Standard Error: 44_906 - .saturating_add(Weight::from_parts(12_118_499, 0).saturating_mul(b.into())) + // Minimum execution time: 780_000 picoseconds. + Weight::from_parts(850_000, 20191) + // Standard Error: 38_209 + .saturating_add(Weight::from_parts(9_042_866, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -414,32 +405,52 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Refungible TotalSupply (r:1 w:0) + /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn set_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `458 + b * (261 ±0)` - // Estimated: `56460` - // Minimum execution time: 11_249_000 picoseconds. - Weight::from_parts(11_420_000, 56460) - // Standard Error: 72_033 - .saturating_add(Weight::from_parts(7_008_012, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `502 + b * (261 ±0)` + // Estimated: `36269` + // Minimum execution time: 5_160_000 picoseconds. + Weight::from_parts(15_283_437, 36269) + // Standard Error: 12_988 + .saturating_add(Weight::from_parts(2_757_274, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: Common CollectionPropertyPermissions (r:1 w:0) + /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Refungible TokenProperties (r:0 w:1) + /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// The range of component `b` is `[0, 64]`. + fn reset_token_properties(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `317 + b * (261 ±0)` + // Estimated: `20191` + // Minimum execution time: 3_560_000 picoseconds. + Weight::from_parts(6_788_279, 20191) + // Standard Error: 5_443 + .saturating_add(Weight::from_parts(2_641_825, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Refungible TotalSupply (r:1 w:0) + /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn delete_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `463 + b * (33291 ±0)` - // Estimated: `56460` - // Minimum execution time: 11_368_000 picoseconds. - Weight::from_parts(11_546_000, 56460) - // Standard Error: 85_444 - .saturating_add(Weight::from_parts(24_644_980, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `561 + b * (33291 ±0)` + // Estimated: `36269` + // Minimum execution time: 4_650_000 picoseconds. + Weight::from_parts(4_820_000, 36269) + // Standard Error: 24_035 + .saturating_add(Weight::from_parts(8_828_039, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Refungible TotalSupply (r:1 w:1) @@ -448,10 +459,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn repartition_item() -> Weight { // Proof Size summary in bytes: - // Measured: `321` - // Estimated: `7059` - // Minimum execution time: 13_586_000 picoseconds. - Weight::from_parts(14_489_000, 7059) + // Measured: `288` + // Estimated: `3554` + // Minimum execution time: 7_260_000 picoseconds. + Weight::from_parts(7_520_000, 3554) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -459,10 +470,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn token_owner() -> Weight { // Proof Size summary in bytes: - // Measured: `321` + // Measured: `288` // Estimated: `6118` - // Minimum execution time: 7_049_000 picoseconds. - Weight::from_parts(7_320_000, 6118) + // Minimum execution time: 3_200_000 picoseconds. + Weight::from_parts(3_360_000, 6118) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -471,8 +482,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_432_000 picoseconds. - Weight::from_parts(6_642_000, 0) + // Minimum execution time: 2_420_000 picoseconds. + Weight::from_parts(2_540_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -481,18 +492,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 3_030_000 picoseconds. - Weight::from_parts(3_206_000, 3576) + // Minimum execution time: 1_370_000 picoseconds. + Weight::from_parts(1_470_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) fn repair_item() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `120` // Estimated: `36269` - // Minimum execution time: 4_371_000 picoseconds. - Weight::from_parts(4_555_000, 36269) + // Minimum execution time: 1_350_000 picoseconds. + Weight::from_parts(1_420_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -504,8 +515,6 @@ impl WeightInfo for () { /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:1 w:1) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:1) @@ -516,19 +525,17 @@ impl WeightInfo for () { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn create_item() -> Weight { // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `63471` - // Minimum execution time: 30_759_000 picoseconds. - Weight::from_parts(31_321_000, 63471) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Measured: `318` + // Estimated: `20191` + // Minimum execution time: 11_620_000 picoseconds. + Weight::from_parts(12_110_000, 20191) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: Refungible TokensMinted (r:1 w:1) /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:1 w:1) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:200 w:200) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) @@ -540,24 +547,20 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `28192 + b * (35279 ±0)` - // Minimum execution time: 4_024_000 picoseconds. - Weight::from_parts(4_145_000, 28192) - // Standard Error: 3_332 - .saturating_add(Weight::from_parts(8_967_757, 0).saturating_mul(b.into())) + // Measured: `318` + // Estimated: `20191` + // Minimum execution time: 1_690_000 picoseconds. + Weight::from_parts(1_750_000, 20191) + // Standard Error: 1_647 + .saturating_add(Weight::from_parts(4_876_194, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 35279).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) } /// Storage: Refungible TokensMinted (r:1 w:1) /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:200 w:200) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:200 w:200) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) @@ -569,24 +572,22 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `25652 + b * (37819 ±0)` - // Minimum execution time: 3_715_000 picoseconds. - Weight::from_parts(3_881_000, 25652) - // Standard Error: 3_275 - .saturating_add(Weight::from_parts(10_525_271, 0).saturating_mul(b.into())) + // Measured: `318` + // Estimated: `20191 + b * (2540 ±0)` + // Minimum execution time: 1_730_000 picoseconds. + Weight::from_parts(1_790_000, 20191) + // Standard Error: 1_611 + .saturating_add(Weight::from_parts(6_002_948, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 37819).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(b.into()))) + .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } /// Storage: Refungible TokensMinted (r:1 w:1) /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:200 w:200) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) @@ -598,15 +599,15 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_owners(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `60931 + b * (2540 ±0)` - // Minimum execution time: 13_150_000 picoseconds. - Weight::from_parts(15_655_930, 60931) - // Standard Error: 4_170 - .saturating_add(Weight::from_parts(5_673_702, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `318` + // Estimated: `20191 + b * (2540 ±0)` + // Minimum execution time: 6_260_000 picoseconds. + Weight::from_parts(2_297_892, 20191) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(4_093_180, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } @@ -620,10 +621,10 @@ impl WeightInfo for () { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn burn_item_partial() -> Weight { // Proof Size summary in bytes: - // Measured: `490` - // Estimated: `15717` - // Minimum execution time: 28_992_000 picoseconds. - Weight::from_parts(29_325_000, 15717) + // Measured: `456` + // Estimated: `8682` + // Minimum execution time: 17_330_000 picoseconds. + Weight::from_parts(17_910_000, 8682) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -641,10 +642,10 @@ impl WeightInfo for () { /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) fn burn_item_fully() -> Weight { // Proof Size summary in bytes: - // Measured: `375` - // Estimated: `14070` - // Minimum execution time: 27_980_000 picoseconds. - Weight::from_parts(28_582_000, 14070) + // Measured: `341` + // Estimated: `3554` + // Minimum execution time: 16_920_000 picoseconds. + Weight::from_parts(17_630_000, 3554) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -654,10 +655,10 @@ impl WeightInfo for () { /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) fn transfer_normal() -> Weight { // Proof Size summary in bytes: - // Measured: `398` - // Estimated: `9623` - // Minimum execution time: 18_746_000 picoseconds. - Weight::from_parts(19_096_000, 9623) + // Measured: `365` + // Estimated: `6118` + // Minimum execution time: 10_600_000 picoseconds. + Weight::from_parts(10_950_000, 6118) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -671,10 +672,10 @@ impl WeightInfo for () { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `375` - // Estimated: `13153` - // Minimum execution time: 21_719_000 picoseconds. - Weight::from_parts(22_219_000, 13153) + // Measured: `341` + // Estimated: `6118` + // Minimum execution time: 12_160_000 picoseconds. + Weight::from_parts(12_570_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -688,10 +689,10 @@ impl WeightInfo for () { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_removing() -> Weight { // Proof Size summary in bytes: - // Measured: `490` - // Estimated: `13153` - // Minimum execution time: 24_784_000 picoseconds. - Weight::from_parts(25_231_000, 13153) + // Measured: `456` + // Estimated: `6118` + // Minimum execution time: 14_300_000 picoseconds. + Weight::from_parts(14_760_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -705,10 +706,10 @@ impl WeightInfo for () { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_creating_removing() -> Weight { // Proof Size summary in bytes: - // Measured: `375` - // Estimated: `15693` - // Minimum execution time: 24_865_000 picoseconds. - Weight::from_parts(25_253_000, 15693) + // Measured: `341` + // Estimated: `6118` + // Minimum execution time: 14_050_000 picoseconds. + Weight::from_parts(14_490_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -718,10 +719,10 @@ impl WeightInfo for () { /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) fn approve() -> Weight { // Proof Size summary in bytes: - // Measured: `256` + // Measured: `223` // Estimated: `3554` - // Minimum execution time: 12_318_000 picoseconds. - Weight::from_parts(12_597_000, 3554) + // Minimum execution time: 6_210_000 picoseconds. + Weight::from_parts(6_500_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -731,10 +732,10 @@ impl WeightInfo for () { /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) fn approve_from() -> Weight { // Proof Size summary in bytes: - // Measured: `244` + // Measured: `211` // Estimated: `3554` - // Minimum execution time: 12_276_000 picoseconds. - Weight::from_parts(12_557_000, 3554) + // Minimum execution time: 6_280_000 picoseconds. + Weight::from_parts(6_540_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -746,10 +747,10 @@ impl WeightInfo for () { /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) fn transfer_from_normal() -> Weight { // Proof Size summary in bytes: - // Measured: `528` - // Estimated: `13193` - // Minimum execution time: 26_852_000 picoseconds. - Weight::from_parts(27_427_000, 13193) + // Measured: `495` + // Estimated: `6118` + // Minimum execution time: 15_570_000 picoseconds. + Weight::from_parts(15_990_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -765,10 +766,10 @@ impl WeightInfo for () { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_from_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `16723` - // Minimum execution time: 29_893_000 picoseconds. - Weight::from_parts(30_345_000, 16723) + // Measured: `471` + // Estimated: `6118` + // Minimum execution time: 17_030_000 picoseconds. + Weight::from_parts(17_430_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -784,10 +785,10 @@ impl WeightInfo for () { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_from_removing() -> Weight { // Proof Size summary in bytes: - // Measured: `620` - // Estimated: `16723` - // Minimum execution time: 32_784_000 picoseconds. - Weight::from_parts(33_322_000, 16723) + // Measured: `586` + // Estimated: `6118` + // Minimum execution time: 19_300_000 picoseconds. + Weight::from_parts(19_760_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -803,10 +804,10 @@ impl WeightInfo for () { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn transfer_from_creating_removing() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `19263` - // Minimum execution time: 32_987_000 picoseconds. - Weight::from_parts(33_428_000, 19263) + // Measured: `471` + // Estimated: `6118` + // Minimum execution time: 19_190_000 picoseconds. + Weight::from_parts(19_560_000, 6118) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -826,10 +827,10 @@ impl WeightInfo for () { /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) fn burn_from() -> Weight { // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `17640` - // Minimum execution time: 38_277_000 picoseconds. - Weight::from_parts(38_983_000, 17640) + // Measured: `471` + // Estimated: `3570` + // Minimum execution time: 22_590_000 picoseconds. + Weight::from_parts(23_290_000, 3570) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -838,12 +839,12 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 64]`. fn set_token_property_permissions(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `281` + // Measured: `314` // Estimated: `20191` - // Minimum execution time: 2_254_000 picoseconds. - Weight::from_parts(2_335_000, 20191) - // Standard Error: 44_906 - .saturating_add(Weight::from_parts(12_118_499, 0).saturating_mul(b.into())) + // Minimum execution time: 780_000 picoseconds. + Weight::from_parts(850_000, 20191) + // Standard Error: 38_209 + .saturating_add(Weight::from_parts(9_042_866, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -851,32 +852,52 @@ impl WeightInfo for () { /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Refungible TotalSupply (r:1 w:0) + /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn set_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `458 + b * (261 ±0)` - // Estimated: `56460` - // Minimum execution time: 11_249_000 picoseconds. - Weight::from_parts(11_420_000, 56460) - // Standard Error: 72_033 - .saturating_add(Weight::from_parts(7_008_012, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `502 + b * (261 ±0)` + // Estimated: `36269` + // Minimum execution time: 5_160_000 picoseconds. + Weight::from_parts(15_283_437, 36269) + // Standard Error: 12_988 + .saturating_add(Weight::from_parts(2_757_274, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: Common CollectionPropertyPermissions (r:1 w:0) + /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Refungible TokenProperties (r:0 w:1) + /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// The range of component `b` is `[0, 64]`. + fn reset_token_properties(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `317 + b * (261 ±0)` + // Estimated: `20191` + // Minimum execution time: 3_560_000 picoseconds. + Weight::from_parts(6_788_279, 20191) + // Standard Error: 5_443 + .saturating_add(Weight::from_parts(2_641_825, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Refungible TotalSupply (r:1 w:0) + /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn delete_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `463 + b * (33291 ±0)` - // Estimated: `56460` - // Minimum execution time: 11_368_000 picoseconds. - Weight::from_parts(11_546_000, 56460) - // Standard Error: 85_444 - .saturating_add(Weight::from_parts(24_644_980, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `561 + b * (33291 ±0)` + // Estimated: `36269` + // Minimum execution time: 4_650_000 picoseconds. + Weight::from_parts(4_820_000, 36269) + // Standard Error: 24_035 + .saturating_add(Weight::from_parts(8_828_039, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Refungible TotalSupply (r:1 w:1) @@ -885,10 +906,10 @@ impl WeightInfo for () { /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn repartition_item() -> Weight { // Proof Size summary in bytes: - // Measured: `321` - // Estimated: `7059` - // Minimum execution time: 13_586_000 picoseconds. - Weight::from_parts(14_489_000, 7059) + // Measured: `288` + // Estimated: `3554` + // Minimum execution time: 7_260_000 picoseconds. + Weight::from_parts(7_520_000, 3554) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -896,10 +917,10 @@ impl WeightInfo for () { /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) fn token_owner() -> Weight { // Proof Size summary in bytes: - // Measured: `321` + // Measured: `288` // Estimated: `6118` - // Minimum execution time: 7_049_000 picoseconds. - Weight::from_parts(7_320_000, 6118) + // Minimum execution time: 3_200_000 picoseconds. + Weight::from_parts(3_360_000, 6118) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -908,8 +929,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_432_000 picoseconds. - Weight::from_parts(6_642_000, 0) + // Minimum execution time: 2_420_000 picoseconds. + Weight::from_parts(2_540_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -918,18 +939,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 3_030_000 picoseconds. - Weight::from_parts(3_206_000, 3576) + // Minimum execution time: 1_370_000 picoseconds. + Weight::from_parts(1_470_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) fn repair_item() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `120` // Estimated: `36269` - // Minimum execution time: 4_371_000 picoseconds. - Weight::from_parts(4_555_000, 36269) + // Minimum execution time: 1_350_000 picoseconds. + Weight::from_parts(1_420_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } From 331f0fad33265fefe1c618e781c55092ed62e69b Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 26 Sep 2023 11:54:52 +0200 Subject: [PATCH 038/143] fix: cache chain build in CI --- .docker/Dockerfile-chain-dev | 1 + 1 file changed, 1 insertion(+) diff --git a/.docker/Dockerfile-chain-dev b/.docker/Dockerfile-chain-dev index a220ad8bbe..e18b7e0330 100644 --- a/.docker/Dockerfile-chain-dev +++ b/.docker/Dockerfile-chain-dev @@ -21,6 +21,7 @@ COPY . /dev_chain WORKDIR /dev_chain +RUN cargo build --profile integration-tests --features=${NETWORK}-runtime RUN echo "$NETWORK" CMD cargo run --profile integration-tests --features=${NETWORK}-runtime -- --dev -linfo --rpc-cors=all --unsafe-rpc-external From 192f49d61f1a9393132329bc91718b98c107a094 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 26 Sep 2023 12:10:11 +0200 Subject: [PATCH 039/143] fix: dont read tpp when not needed --- pallets/common/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index a8e01c2829..d182b44fbb 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -1342,11 +1342,12 @@ impl Pallet { FTE: FnOnce() -> bool, { let mut is_collection_admin = LazyValue::new(|| collection.is_owner_or_admin(sender)); - let permissions = Self::property_permissions(collection.id); + let mut permissions = LazyValue::new(|| Self::property_permissions(collection.id)); let mut changed = false; for (key, value) in properties_updates { let permission = permissions + .value() .get(&key) .cloned() .unwrap_or_else(PropertyPermission::none); From 9b4620c8952fceac9c227e9f50a8659d334abf84 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 26 Sep 2023 12:28:23 +0200 Subject: [PATCH 040/143] chore: repeat temporary nft/rft benches --- pallets/nonfungible/src/weights.rs | 258 +++++++++++----------- pallets/refungible/src/weights.rs | 330 ++++++++++++++--------------- 2 files changed, 280 insertions(+), 308 deletions(-) diff --git a/pallets/nonfungible/src/weights.rs b/pallets/nonfungible/src/weights.rs index c4f2feedf5..5c30ca55dc 100644 --- a/pallets/nonfungible/src/weights.rs +++ b/pallets/nonfungible/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_nonfungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-25, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -61,27 +61,23 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:1 w:1) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:1) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// Storage: Nonfungible Owned (r:0 w:1) /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn create_item() -> Weight { // Proof Size summary in bytes: - // Measured: `456` - // Estimated: `20191` - // Minimum execution time: 10_660_000 picoseconds. - Weight::from_parts(10_960_000, 20191) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `142` + // Estimated: `3530` + // Minimum execution time: 6_680_000 picoseconds. + Weight::from_parts(6_910_000, 3530) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: Nonfungible TokensMinted (r:1 w:1) /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:1 w:1) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:200) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// Storage: Nonfungible Owned (r:0 w:200) @@ -89,13 +85,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `456` - // Estimated: `20191` - // Minimum execution time: 2_200_000 picoseconds. - Weight::from_parts(612_529, 20191) - // Standard Error: 737 - .saturating_add(Weight::from_parts(3_807_625, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `142` + // Estimated: `3530` + // Minimum execution time: 2_120_000 picoseconds. + Weight::from_parts(2_190_000, 3530) + // Standard Error: 1_175 + .saturating_add(Weight::from_parts(3_053_846, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) } @@ -103,8 +99,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:200 w:200) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:200) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// Storage: Nonfungible Owned (r:0 w:200) @@ -112,13 +106,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `456` - // Estimated: `20191 + b * (2540 ±0)` - // Minimum execution time: 2_190_000 picoseconds. - Weight::from_parts(2_280_000, 20191) - // Standard Error: 1_011 - .saturating_add(Weight::from_parts(4_931_681, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `142` + // Estimated: `3481 + b * (2540 ±0)` + // Minimum execution time: 2_210_000 picoseconds. + Weight::from_parts(2_280_000, 3481) + // Standard Error: 1_618 + .saturating_add(Weight::from_parts(4_308_375, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -142,8 +136,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 13_470_000 picoseconds. - Weight::from_parts(13_840_000, 3530) + // Minimum execution time: 13_270_000 picoseconds. + Weight::from_parts(13_559_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -165,8 +159,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 16_940_000 picoseconds. - Weight::from_parts(17_340_000, 3530) + // Minimum execution time: 16_680_000 picoseconds. + Weight::from_parts(17_260_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -191,10 +185,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 16_910_000 picoseconds. - Weight::from_parts(17_170_000, 5874) - // Standard Error: 102_760 - .saturating_add(Weight::from_parts(45_644_966, 0).saturating_mul(b.into())) + // Minimum execution time: 16_630_000 picoseconds. + Weight::from_parts(17_030_000, 5874) + // Standard Error: 137_617 + .saturating_add(Weight::from_parts(47_437_920, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -213,8 +207,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 9_420_000 picoseconds. - Weight::from_parts(9_710_000, 6070) + // Minimum execution time: 10_200_000 picoseconds. + Weight::from_parts(10_490_000, 6070) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -226,8 +220,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 5_400_000 picoseconds. - Weight::from_parts(5_640_000, 3522) + // Minimum execution time: 5_730_000 picoseconds. + Weight::from_parts(5_980_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -239,8 +233,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 5_390_000 picoseconds. - Weight::from_parts(5_610_000, 3522) + // Minimum execution time: 5_760_000 picoseconds. + Weight::from_parts(5_990_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -250,8 +244,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 2_970_000 picoseconds. - Weight::from_parts(3_170_000, 3522) + // Minimum execution time: 3_270_000 picoseconds. + Weight::from_parts(3_440_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -272,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 15_400_000 picoseconds. - Weight::from_parts(15_850_000, 3530) + // Minimum execution time: 16_770_000 picoseconds. + Weight::from_parts(17_150_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -284,10 +278,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 780_000 picoseconds. - Weight::from_parts(830_000, 20191) - // Standard Error: 35_354 - .saturating_add(Weight::from_parts(8_422_695, 0).saturating_mul(b.into())) + // Minimum execution time: 870_000 picoseconds. + Weight::from_parts(910_000, 20191) + // Standard Error: 37_993 + .saturating_add(Weight::from_parts(8_947_342, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -302,10 +296,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 5_530_000 picoseconds. - Weight::from_parts(10_829_898, 36269) - // Standard Error: 5_106 - .saturating_add(Weight::from_parts(2_601_900, 0).saturating_mul(b.into())) + // Minimum execution time: 2_570_000 picoseconds. + Weight::from_parts(10_787_333, 36269) + // Standard Error: 7_619 + .saturating_add(Weight::from_parts(2_939_955, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -318,10 +312,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 3_690_000 picoseconds. - Weight::from_parts(9_633_879, 20191) - // Standard Error: 6_573 - .saturating_add(Weight::from_parts(2_694_801, 0).saturating_mul(b.into())) + // Minimum execution time: 530_000 picoseconds. + Weight::from_parts(5_572_362, 20191) + // Standard Error: 6_997 + .saturating_add(Weight::from_parts(2_986_451, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -336,10 +330,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 5_860_000 picoseconds. - Weight::from_parts(6_000_000, 36269) - // Standard Error: 26_204 - .saturating_add(Weight::from_parts(9_601_645, 0).saturating_mul(b.into())) + // Minimum execution time: 2_580_000 picoseconds. + Weight::from_parts(2_640_000, 36269) + // Standard Error: 25_713 + .saturating_add(Weight::from_parts(9_667_974, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -349,8 +343,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 2_930_000 picoseconds. - Weight::from_parts(3_060_000, 3522) + // Minimum execution time: 2_910_000 picoseconds. + Weight::from_parts(3_020_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -359,8 +353,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_680_000 picoseconds. - Weight::from_parts(2_780_000, 0) + // Minimum execution time: 2_510_000 picoseconds. + Weight::from_parts(2_660_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -369,8 +363,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 2_000_000 picoseconds. - Weight::from_parts(2_110_000, 3576) + // Minimum execution time: 1_860_000 picoseconds. + Weight::from_parts(2_010_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -379,8 +373,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 2_200_000 picoseconds. - Weight::from_parts(2_340_000, 36269) + // Minimum execution time: 2_220_000 picoseconds. + Weight::from_parts(2_350_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -392,27 +386,23 @@ impl WeightInfo for () { /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:1 w:1) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:1) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// Storage: Nonfungible Owned (r:0 w:1) /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn create_item() -> Weight { // Proof Size summary in bytes: - // Measured: `456` - // Estimated: `20191` - // Minimum execution time: 10_660_000 picoseconds. - Weight::from_parts(10_960_000, 20191) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `142` + // Estimated: `3530` + // Minimum execution time: 6_680_000 picoseconds. + Weight::from_parts(6_910_000, 3530) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: Nonfungible TokensMinted (r:1 w:1) /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:1 w:1) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:200) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// Storage: Nonfungible Owned (r:0 w:200) @@ -420,13 +410,13 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `456` - // Estimated: `20191` - // Minimum execution time: 2_200_000 picoseconds. - Weight::from_parts(612_529, 20191) - // Standard Error: 737 - .saturating_add(Weight::from_parts(3_807_625, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `142` + // Estimated: `3530` + // Minimum execution time: 2_120_000 picoseconds. + Weight::from_parts(2_190_000, 3530) + // Standard Error: 1_175 + .saturating_add(Weight::from_parts(3_053_846, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) } @@ -434,8 +424,6 @@ impl WeightInfo for () { /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Nonfungible AccountBalance (r:200 w:200) /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:0 w:200) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// Storage: Nonfungible Owned (r:0 w:200) @@ -443,13 +431,13 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `456` - // Estimated: `20191 + b * (2540 ±0)` - // Minimum execution time: 2_190_000 picoseconds. - Weight::from_parts(2_280_000, 20191) - // Standard Error: 1_011 - .saturating_add(Weight::from_parts(4_931_681, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `142` + // Estimated: `3481 + b * (2540 ±0)` + // Minimum execution time: 2_210_000 picoseconds. + Weight::from_parts(2_280_000, 3481) + // Standard Error: 1_618 + .saturating_add(Weight::from_parts(4_308_375, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -473,8 +461,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 13_470_000 picoseconds. - Weight::from_parts(13_840_000, 3530) + // Minimum execution time: 13_270_000 picoseconds. + Weight::from_parts(13_559_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -496,8 +484,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 16_940_000 picoseconds. - Weight::from_parts(17_340_000, 3530) + // Minimum execution time: 16_680_000 picoseconds. + Weight::from_parts(17_260_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -522,10 +510,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 16_910_000 picoseconds. - Weight::from_parts(17_170_000, 5874) - // Standard Error: 102_760 - .saturating_add(Weight::from_parts(45_644_966, 0).saturating_mul(b.into())) + // Minimum execution time: 16_630_000 picoseconds. + Weight::from_parts(17_030_000, 5874) + // Standard Error: 137_617 + .saturating_add(Weight::from_parts(47_437_920, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -544,8 +532,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 9_420_000 picoseconds. - Weight::from_parts(9_710_000, 6070) + // Minimum execution time: 10_200_000 picoseconds. + Weight::from_parts(10_490_000, 6070) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -557,8 +545,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 5_400_000 picoseconds. - Weight::from_parts(5_640_000, 3522) + // Minimum execution time: 5_730_000 picoseconds. + Weight::from_parts(5_980_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -570,8 +558,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 5_390_000 picoseconds. - Weight::from_parts(5_610_000, 3522) + // Minimum execution time: 5_760_000 picoseconds. + Weight::from_parts(5_990_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -581,8 +569,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 2_970_000 picoseconds. - Weight::from_parts(3_170_000, 3522) + // Minimum execution time: 3_270_000 picoseconds. + Weight::from_parts(3_440_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -603,8 +591,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 15_400_000 picoseconds. - Weight::from_parts(15_850_000, 3530) + // Minimum execution time: 16_770_000 picoseconds. + Weight::from_parts(17_150_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -615,10 +603,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 780_000 picoseconds. - Weight::from_parts(830_000, 20191) - // Standard Error: 35_354 - .saturating_add(Weight::from_parts(8_422_695, 0).saturating_mul(b.into())) + // Minimum execution time: 870_000 picoseconds. + Weight::from_parts(910_000, 20191) + // Standard Error: 37_993 + .saturating_add(Weight::from_parts(8_947_342, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -633,10 +621,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 5_530_000 picoseconds. - Weight::from_parts(10_829_898, 36269) - // Standard Error: 5_106 - .saturating_add(Weight::from_parts(2_601_900, 0).saturating_mul(b.into())) + // Minimum execution time: 2_570_000 picoseconds. + Weight::from_parts(10_787_333, 36269) + // Standard Error: 7_619 + .saturating_add(Weight::from_parts(2_939_955, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -649,10 +637,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 3_690_000 picoseconds. - Weight::from_parts(9_633_879, 20191) - // Standard Error: 6_573 - .saturating_add(Weight::from_parts(2_694_801, 0).saturating_mul(b.into())) + // Minimum execution time: 530_000 picoseconds. + Weight::from_parts(5_572_362, 20191) + // Standard Error: 6_997 + .saturating_add(Weight::from_parts(2_986_451, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -667,10 +655,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 5_860_000 picoseconds. - Weight::from_parts(6_000_000, 36269) - // Standard Error: 26_204 - .saturating_add(Weight::from_parts(9_601_645, 0).saturating_mul(b.into())) + // Minimum execution time: 2_580_000 picoseconds. + Weight::from_parts(2_640_000, 36269) + // Standard Error: 25_713 + .saturating_add(Weight::from_parts(9_667_974, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -680,8 +668,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 2_930_000 picoseconds. - Weight::from_parts(3_060_000, 3522) + // Minimum execution time: 2_910_000 picoseconds. + Weight::from_parts(3_020_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -690,8 +678,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_680_000 picoseconds. - Weight::from_parts(2_780_000, 0) + // Minimum execution time: 2_510_000 picoseconds. + Weight::from_parts(2_660_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -700,8 +688,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 2_000_000 picoseconds. - Weight::from_parts(2_110_000, 3576) + // Minimum execution time: 1_860_000 picoseconds. + Weight::from_parts(2_010_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -710,8 +698,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 2_200_000 picoseconds. - Weight::from_parts(2_340_000, 36269) + // Minimum execution time: 2_220_000 picoseconds. + Weight::from_parts(2_350_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/refungible/src/weights.rs b/pallets/refungible/src/weights.rs index c3f1621fd3..db0fb85d90 100644 --- a/pallets/refungible/src/weights.rs +++ b/pallets/refungible/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_refungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-25, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -68,8 +68,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:1 w:1) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:1) /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:0 w:1) @@ -78,19 +76,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn create_item() -> Weight { // Proof Size summary in bytes: - // Measured: `318` - // Estimated: `20191` - // Minimum execution time: 11_620_000 picoseconds. - Weight::from_parts(12_110_000, 20191) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `4` + // Estimated: `3530` + // Minimum execution time: 7_490_000 picoseconds. + Weight::from_parts(7_820_000, 3530) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: Refungible TokensMinted (r:1 w:1) /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:1 w:1) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:0 w:200) @@ -100,13 +96,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `318` - // Estimated: `20191` - // Minimum execution time: 1_690_000 picoseconds. - Weight::from_parts(1_750_000, 20191) - // Standard Error: 1_647 - .saturating_add(Weight::from_parts(4_876_194, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `4` + // Estimated: `3530` + // Minimum execution time: 1_570_000 picoseconds. + Weight::from_parts(1_630_000, 3530) + // Standard Error: 1_544 + .saturating_add(Weight::from_parts(4_082_425, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) } @@ -114,8 +110,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:200 w:200) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:0 w:200) @@ -125,13 +119,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `318` - // Estimated: `20191 + b * (2540 ±0)` - // Minimum execution time: 1_730_000 picoseconds. - Weight::from_parts(1_790_000, 20191) - // Standard Error: 1_611 - .saturating_add(Weight::from_parts(6_002_948, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `4` + // Estimated: `3481 + b * (2540 ±0)` + // Minimum execution time: 1_620_000 picoseconds. + Weight::from_parts(1_690_000, 3481) + // Standard Error: 1_349 + .saturating_add(Weight::from_parts(5_124_357, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(b.into()))) @@ -141,8 +135,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:200 w:200) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:0 w:1) @@ -152,13 +144,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_owners(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `318` - // Estimated: `20191 + b * (2540 ±0)` - // Minimum execution time: 6_260_000 picoseconds. - Weight::from_parts(2_297_892, 20191) - // Standard Error: 1_302 - .saturating_add(Weight::from_parts(4_093_180, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `4` + // Estimated: `3481 + b * (2540 ±0)` + // Minimum execution time: 2_570_000 picoseconds. + Weight::from_parts(2_680_000, 3481) + // Standard Error: 637 + .saturating_add(Weight::from_parts(3_753_594, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -176,8 +168,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 17_330_000 picoseconds. - Weight::from_parts(17_910_000, 8682) + // Minimum execution time: 16_200_000 picoseconds. + Weight::from_parts(16_620_000, 8682) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -197,8 +189,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 16_920_000 picoseconds. - Weight::from_parts(17_630_000, 3554) + // Minimum execution time: 15_820_000 picoseconds. + Weight::from_parts(16_410_000, 3554) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -210,8 +202,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 10_600_000 picoseconds. - Weight::from_parts(10_950_000, 6118) + // Minimum execution time: 9_970_000 picoseconds. + Weight::from_parts(10_250_000, 6118) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -227,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 12_160_000 picoseconds. - Weight::from_parts(12_570_000, 6118) + // Minimum execution time: 11_530_000 picoseconds. + Weight::from_parts(11_800_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -244,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 14_300_000 picoseconds. - Weight::from_parts(14_760_000, 6118) + // Minimum execution time: 13_520_000 picoseconds. + Weight::from_parts(13_860_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -261,8 +253,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 14_050_000 picoseconds. - Weight::from_parts(14_490_000, 6118) + // Minimum execution time: 13_290_000 picoseconds. + Weight::from_parts(13_590_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -274,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 6_210_000 picoseconds. - Weight::from_parts(6_500_000, 3554) + // Minimum execution time: 5_910_000 picoseconds. + Weight::from_parts(6_070_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -287,8 +279,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 6_280_000 picoseconds. - Weight::from_parts(6_540_000, 3554) + // Minimum execution time: 5_920_000 picoseconds. + Weight::from_parts(6_100_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -302,8 +294,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 15_570_000 picoseconds. - Weight::from_parts(15_990_000, 6118) + // Minimum execution time: 14_460_000 picoseconds. + Weight::from_parts(14_900_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -321,8 +313,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 17_030_000 picoseconds. - Weight::from_parts(17_430_000, 6118) + // Minimum execution time: 15_920_000 picoseconds. + Weight::from_parts(16_340_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -340,8 +332,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 19_300_000 picoseconds. - Weight::from_parts(19_760_000, 6118) + // Minimum execution time: 18_010_000 picoseconds. + Weight::from_parts(18_350_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -359,8 +351,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 19_190_000 picoseconds. - Weight::from_parts(19_560_000, 6118) + // Minimum execution time: 17_800_000 picoseconds. + Weight::from_parts(18_160_000, 6118) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -382,8 +374,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 22_590_000 picoseconds. - Weight::from_parts(23_290_000, 3570) + // Minimum execution time: 21_020_000 picoseconds. + Weight::from_parts(21_450_000, 3570) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -394,10 +386,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 780_000 picoseconds. - Weight::from_parts(850_000, 20191) - // Standard Error: 38_209 - .saturating_add(Weight::from_parts(9_042_866, 0).saturating_mul(b.into())) + // Minimum execution time: 790_000 picoseconds. + Weight::from_parts(840_000, 20191) + // Standard Error: 35_436 + .saturating_add(Weight::from_parts(8_505_258, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -412,10 +404,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 5_160_000 picoseconds. - Weight::from_parts(15_283_437, 36269) - // Standard Error: 12_988 - .saturating_add(Weight::from_parts(2_757_274, 0).saturating_mul(b.into())) + // Minimum execution time: 1_710_000 picoseconds. + Weight::from_parts(5_892_635, 36269) + // Standard Error: 5_857 + .saturating_add(Weight::from_parts(2_956_238, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -428,10 +420,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 3_560_000 picoseconds. - Weight::from_parts(6_788_279, 20191) - // Standard Error: 5_443 - .saturating_add(Weight::from_parts(2_641_825, 0).saturating_mul(b.into())) + // Minimum execution time: 500_000 picoseconds. + Weight::from_parts(7_856_522, 20191) + // Standard Error: 7_896 + .saturating_add(Weight::from_parts(2_770_333, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -446,10 +438,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 4_650_000 picoseconds. - Weight::from_parts(4_820_000, 36269) - // Standard Error: 24_035 - .saturating_add(Weight::from_parts(8_828_039, 0).saturating_mul(b.into())) + // Minimum execution time: 1_710_000 picoseconds. + Weight::from_parts(1_790_000, 36269) + // Standard Error: 23_937 + .saturating_add(Weight::from_parts(9_102_744, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -461,8 +453,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 7_260_000 picoseconds. - Weight::from_parts(7_520_000, 3554) + // Minimum execution time: 7_540_000 picoseconds. + Weight::from_parts(7_740_000, 3554) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -472,8 +464,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 3_200_000 picoseconds. - Weight::from_parts(3_360_000, 6118) + // Minimum execution time: 3_050_000 picoseconds. + Weight::from_parts(3_230_000, 6118) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -482,8 +474,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_420_000 picoseconds. - Weight::from_parts(2_540_000, 0) + // Minimum execution time: 2_560_000 picoseconds. + Weight::from_parts(2_680_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -492,8 +484,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 1_370_000 picoseconds. - Weight::from_parts(1_470_000, 3576) + // Minimum execution time: 1_430_000 picoseconds. + Weight::from_parts(1_510_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -502,8 +494,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 1_350_000 picoseconds. - Weight::from_parts(1_420_000, 36269) + // Minimum execution time: 1_380_000 picoseconds. + Weight::from_parts(1_470_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -515,8 +507,6 @@ impl WeightInfo for () { /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:1 w:1) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:1) /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:0 w:1) @@ -525,19 +515,17 @@ impl WeightInfo for () { /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) fn create_item() -> Weight { // Proof Size summary in bytes: - // Measured: `318` - // Estimated: `20191` - // Minimum execution time: 11_620_000 picoseconds. - Weight::from_parts(12_110_000, 20191) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `4` + // Estimated: `3530` + // Minimum execution time: 7_490_000 picoseconds. + Weight::from_parts(7_820_000, 3530) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: Refungible TokensMinted (r:1 w:1) /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:1 w:1) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:0 w:200) @@ -547,13 +535,13 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `318` - // Estimated: `20191` - // Minimum execution time: 1_690_000 picoseconds. - Weight::from_parts(1_750_000, 20191) - // Standard Error: 1_647 - .saturating_add(Weight::from_parts(4_876_194, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `4` + // Estimated: `3530` + // Minimum execution time: 1_570_000 picoseconds. + Weight::from_parts(1_630_000, 3530) + // Standard Error: 1_544 + .saturating_add(Weight::from_parts(4_082_425, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) } @@ -561,8 +549,6 @@ impl WeightInfo for () { /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:200 w:200) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:0 w:200) @@ -572,13 +558,13 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `318` - // Estimated: `20191 + b * (2540 ±0)` - // Minimum execution time: 1_730_000 picoseconds. - Weight::from_parts(1_790_000, 20191) - // Standard Error: 1_611 - .saturating_add(Weight::from_parts(6_002_948, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `4` + // Estimated: `3481 + b * (2540 ±0)` + // Minimum execution time: 1_620_000 picoseconds. + Weight::from_parts(1_690_000, 3481) + // Standard Error: 1_349 + .saturating_add(Weight::from_parts(5_124_357, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(b.into()))) @@ -588,8 +574,6 @@ impl WeightInfo for () { /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) /// Storage: Refungible AccountBalance (r:200 w:200) /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible Balance (r:0 w:200) /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:0 w:1) @@ -599,13 +583,13 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_owners(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `318` - // Estimated: `20191 + b * (2540 ±0)` - // Minimum execution time: 6_260_000 picoseconds. - Weight::from_parts(2_297_892, 20191) - // Standard Error: 1_302 - .saturating_add(Weight::from_parts(4_093_180, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `4` + // Estimated: `3481 + b * (2540 ±0)` + // Minimum execution time: 2_570_000 picoseconds. + Weight::from_parts(2_680_000, 3481) + // Standard Error: 637 + .saturating_add(Weight::from_parts(3_753_594, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -623,8 +607,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 17_330_000 picoseconds. - Weight::from_parts(17_910_000, 8682) + // Minimum execution time: 16_200_000 picoseconds. + Weight::from_parts(16_620_000, 8682) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -644,8 +628,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 16_920_000 picoseconds. - Weight::from_parts(17_630_000, 3554) + // Minimum execution time: 15_820_000 picoseconds. + Weight::from_parts(16_410_000, 3554) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -657,8 +641,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 10_600_000 picoseconds. - Weight::from_parts(10_950_000, 6118) + // Minimum execution time: 9_970_000 picoseconds. + Weight::from_parts(10_250_000, 6118) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -674,8 +658,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 12_160_000 picoseconds. - Weight::from_parts(12_570_000, 6118) + // Minimum execution time: 11_530_000 picoseconds. + Weight::from_parts(11_800_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -691,8 +675,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 14_300_000 picoseconds. - Weight::from_parts(14_760_000, 6118) + // Minimum execution time: 13_520_000 picoseconds. + Weight::from_parts(13_860_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -708,8 +692,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 14_050_000 picoseconds. - Weight::from_parts(14_490_000, 6118) + // Minimum execution time: 13_290_000 picoseconds. + Weight::from_parts(13_590_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -721,8 +705,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 6_210_000 picoseconds. - Weight::from_parts(6_500_000, 3554) + // Minimum execution time: 5_910_000 picoseconds. + Weight::from_parts(6_070_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -734,8 +718,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 6_280_000 picoseconds. - Weight::from_parts(6_540_000, 3554) + // Minimum execution time: 5_920_000 picoseconds. + Weight::from_parts(6_100_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -749,8 +733,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 15_570_000 picoseconds. - Weight::from_parts(15_990_000, 6118) + // Minimum execution time: 14_460_000 picoseconds. + Weight::from_parts(14_900_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -768,8 +752,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 17_030_000 picoseconds. - Weight::from_parts(17_430_000, 6118) + // Minimum execution time: 15_920_000 picoseconds. + Weight::from_parts(16_340_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -787,8 +771,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 19_300_000 picoseconds. - Weight::from_parts(19_760_000, 6118) + // Minimum execution time: 18_010_000 picoseconds. + Weight::from_parts(18_350_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -806,8 +790,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 19_190_000 picoseconds. - Weight::from_parts(19_560_000, 6118) + // Minimum execution time: 17_800_000 picoseconds. + Weight::from_parts(18_160_000, 6118) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -829,8 +813,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 22_590_000 picoseconds. - Weight::from_parts(23_290_000, 3570) + // Minimum execution time: 21_020_000 picoseconds. + Weight::from_parts(21_450_000, 3570) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -841,10 +825,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 780_000 picoseconds. - Weight::from_parts(850_000, 20191) - // Standard Error: 38_209 - .saturating_add(Weight::from_parts(9_042_866, 0).saturating_mul(b.into())) + // Minimum execution time: 790_000 picoseconds. + Weight::from_parts(840_000, 20191) + // Standard Error: 35_436 + .saturating_add(Weight::from_parts(8_505_258, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -859,10 +843,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 5_160_000 picoseconds. - Weight::from_parts(15_283_437, 36269) - // Standard Error: 12_988 - .saturating_add(Weight::from_parts(2_757_274, 0).saturating_mul(b.into())) + // Minimum execution time: 1_710_000 picoseconds. + Weight::from_parts(5_892_635, 36269) + // Standard Error: 5_857 + .saturating_add(Weight::from_parts(2_956_238, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -875,10 +859,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 3_560_000 picoseconds. - Weight::from_parts(6_788_279, 20191) - // Standard Error: 5_443 - .saturating_add(Weight::from_parts(2_641_825, 0).saturating_mul(b.into())) + // Minimum execution time: 500_000 picoseconds. + Weight::from_parts(7_856_522, 20191) + // Standard Error: 7_896 + .saturating_add(Weight::from_parts(2_770_333, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -893,10 +877,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 4_650_000 picoseconds. - Weight::from_parts(4_820_000, 36269) - // Standard Error: 24_035 - .saturating_add(Weight::from_parts(8_828_039, 0).saturating_mul(b.into())) + // Minimum execution time: 1_710_000 picoseconds. + Weight::from_parts(1_790_000, 36269) + // Standard Error: 23_937 + .saturating_add(Weight::from_parts(9_102_744, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -908,8 +892,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 7_260_000 picoseconds. - Weight::from_parts(7_520_000, 3554) + // Minimum execution time: 7_540_000 picoseconds. + Weight::from_parts(7_740_000, 3554) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -919,8 +903,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 3_200_000 picoseconds. - Weight::from_parts(3_360_000, 6118) + // Minimum execution time: 3_050_000 picoseconds. + Weight::from_parts(3_230_000, 6118) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -929,8 +913,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_420_000 picoseconds. - Weight::from_parts(2_540_000, 0) + // Minimum execution time: 2_560_000 picoseconds. + Weight::from_parts(2_680_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -939,8 +923,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 1_370_000 picoseconds. - Weight::from_parts(1_470_000, 3576) + // Minimum execution time: 1_430_000 picoseconds. + Weight::from_parts(1_510_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -949,8 +933,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 1_350_000 picoseconds. - Weight::from_parts(1_420_000, 36269) + // Minimum execution time: 1_380_000 picoseconds. + Weight::from_parts(1_470_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } From 12a441c421f3ff602bd9bca29b0f1de8b2356255 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 26 Sep 2023 12:51:49 +0200 Subject: [PATCH 041/143] fix: debug check if new token doesnt have any properties --- pallets/nonfungible/src/lib.rs | 4 ++++ pallets/refungible/src/lib.rs | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/pallets/nonfungible/src/lib.rs b/pallets/nonfungible/src/lib.rs index c61d9bf837..13baa46470 100644 --- a/pallets/nonfungible/src/lib.rs +++ b/pallets/nonfungible/src/lib.rs @@ -632,6 +632,10 @@ impl Pallet { }); let stored_properties = if is_new_token { + debug_assert!(!>::contains_key(( + collection.id, + token_id + ))); TokenPropertiesT::new() } else { >::get((collection.id, token_id)) diff --git a/pallets/refungible/src/lib.rs b/pallets/refungible/src/lib.rs index 24f9ee1d71..84a9c039ad 100644 --- a/pallets/refungible/src/lib.rs +++ b/pallets/refungible/src/lib.rs @@ -575,6 +575,10 @@ impl Pallet { }); let stored_properties = if is_new_token { + debug_assert!(!>::contains_key(( + collection.id, + token_id + ))); TokenPropertiesT::new() } else { >::get((collection.id, token_id)) From 915db5673542d052c861a0a63bee5af1d85d6e3e Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 26 Sep 2023 15:05:31 +0200 Subject: [PATCH 042/143] chore: bench weights --- pallets/app-promotion/src/weights.rs | 218 ++++++----- pallets/collator-selection/src/weights.rs | 270 +++++++------ pallets/common/src/weights.rs | 54 +-- pallets/configuration/src/weights.rs | 62 +-- pallets/evm-migration/src/weights.rs | 86 +++-- pallets/foreign-assets/src/weights.rs | 34 +- pallets/fungible/src/weights.rs | 98 ++--- pallets/identity/src/weights.rs | 446 +++++++++++----------- pallets/maintenance/src/weights.rs | 30 +- pallets/nonfungible/src/weights.rs | 210 +++++----- pallets/refungible/src/weights.rs | 266 ++++++------- pallets/structure/src/weights.rs | 18 +- pallets/unique/src/weights.rs | 174 ++++----- runtime/common/weights/xcm.rs | 120 +++--- 14 files changed, 1074 insertions(+), 1012 deletions(-) diff --git a/pallets/app-promotion/src/weights.rs b/pallets/app-promotion/src/weights.rs index c6a1192ca5..ca71a10f3e 100644 --- a/pallets/app-promotion/src/weights.rs +++ b/pallets/app-promotion/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_app_promotion //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -48,25 +48,29 @@ pub trait WeightInfo { /// Weights for pallet_app_promotion using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + /// Storage: Maintenance Enabled (r:1 w:0) + /// Proof: Maintenance Enabled (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) /// Storage: AppPromotion PendingUnstake (r:1 w:1) /// Proof: AppPromotion PendingUnstake (max_values: None, max_size: Some(157), added: 2632, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:3 w:3) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:3 w:3) + /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) /// Storage: System Account (r:3 w:3) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:3 w:0) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// The range of component `b` is `[0, 3]`. fn on_initialize(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `180 + b * (277 ±0)` - // Estimated: `5602 + b * (6377 ±0)` - // Minimum execution time: 3_724_000 picoseconds. - Weight::from_parts(4_538_653, 5602) - // Standard Error: 14_774 - .saturating_add(Weight::from_parts(10_368_686, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(b.into()))) + // Measured: `222 + b * (285 ±0)` + // Estimated: `3622 + b * (3774 ±0)` + // Minimum execution time: 5_034_000 picoseconds. + Weight::from_parts(5_845_442, 3622) + // Standard Error: 18_650 + .saturating_add(Weight::from_parts(13_172_650, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 6377).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } /// Storage: AppPromotion Admin (r:0 w:1) /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) @@ -74,8 +78,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_426_000 picoseconds. - Weight::from_parts(6_149_000, 0) + // Minimum execution time: 4_838_000 picoseconds. + Weight::from_parts(5_022_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: AppPromotion Admin (r:1 w:0) @@ -90,24 +94,26 @@ impl WeightInfo for SubstrateWeight { /// Proof: AppPromotion Staked (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) /// Storage: System Account (r:101 w:101) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:100 w:100) + /// Storage: Balances Freezes (r:100 w:100) + /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:100 w:0) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: AppPromotion TotalStaked (r:1 w:1) /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) /// The range of component `b` is `[1, 100]`. fn payout_stakers(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `531 + b * (633 ±0)` - // Estimated: `16194 + b * (32560 ±0)` - // Minimum execution time: 84_632_000 picoseconds. - Weight::from_parts(800_384, 16194) - // Standard Error: 19_457 - .saturating_add(Weight::from_parts(49_393_958, 0).saturating_mul(b.into())) + // Measured: `564 + b * (641 ±0)` + // Estimated: `3593 + b * (25550 ±0)` + // Minimum execution time: 88_317_000 picoseconds. + Weight::from_parts(3_660_713, 3593) + // Standard Error: 17_984 + .saturating_add(Weight::from_parts(58_197_541, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().reads((12_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().reads((13_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((12_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 32560).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 25550).saturating_mul(b.into())) } /// Storage: AppPromotion StakesPerAccount (r:1 w:1) /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) @@ -115,7 +121,9 @@ impl WeightInfo for SubstrateWeight { /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) + /// Storage: Balances Freezes (r:1 w:1) + /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:0) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: ParachainSystem ValidationData (r:1 w:0) /// Proof Skipped: ParachainSystem ValidationData (max_values: Some(1), max_size: None, mode: Measured) @@ -125,11 +133,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) fn stake() -> Weight { // Proof Size summary in bytes: - // Measured: `356` - // Estimated: `20260` - // Minimum execution time: 24_750_000 picoseconds. - Weight::from_parts(25_157_000, 20260) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `389` + // Estimated: `4764` + // Minimum execution time: 27_232_000 picoseconds. + Weight::from_parts(27_657_000, 4764) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: Configuration AppPromomotionConfigurationOverride (r:1 w:0) @@ -144,10 +152,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn unstake_all() -> Weight { // Proof Size summary in bytes: - // Measured: `796` - // Estimated: `35720` - // Minimum execution time: 53_670_000 picoseconds. - Weight::from_parts(54_376_000, 35720) + // Measured: `829` + // Estimated: `29095` + // Minimum execution time: 49_829_000 picoseconds. + Weight::from_parts(50_668_000, 29095) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -163,10 +171,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn unstake_partial() -> Weight { // Proof Size summary in bytes: - // Measured: `796` - // Estimated: `39234` - // Minimum execution time: 58_317_000 picoseconds. - Weight::from_parts(59_059_000, 39234) + // Measured: `829` + // Estimated: `29095` + // Minimum execution time: 55_678_000 picoseconds. + Weight::from_parts(56_709_000, 29095) .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -176,10 +184,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn sponsor_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `1027` - // Estimated: `5842` - // Minimum execution time: 18_117_000 picoseconds. - Weight::from_parts(18_634_000, 5842) + // Measured: `1060` + // Estimated: `4325` + // Minimum execution time: 16_628_000 picoseconds. + Weight::from_parts(16_968_000, 4325) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -189,10 +197,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn stop_sponsoring_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `1059` - // Estimated: `5842` - // Minimum execution time: 16_999_000 picoseconds. - Weight::from_parts(17_417_000, 5842) + // Measured: `1092` + // Estimated: `4325` + // Minimum execution time: 15_970_000 picoseconds. + Weight::from_parts(16_316_000, 4325) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -204,8 +212,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `198` // Estimated: `1517` - // Minimum execution time: 14_438_000 picoseconds. - Weight::from_parts(14_931_000, 1517) + // Minimum execution time: 13_052_000 picoseconds. + Weight::from_parts(13_555_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -215,10 +223,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: EvmContractHelpers Sponsoring (max_values: None, max_size: Some(62), added: 2537, mode: MaxEncodedLen) fn stop_sponsoring_contract() -> Weight { // Proof Size summary in bytes: - // Measured: `363` - // Estimated: `5044` - // Minimum execution time: 14_786_000 picoseconds. - Weight::from_parts(15_105_000, 5044) + // Measured: `396` + // Estimated: `3527` + // Minimum execution time: 13_791_000 picoseconds. + Weight::from_parts(14_125_000, 3527) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -226,25 +234,29 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { + /// Storage: Maintenance Enabled (r:1 w:0) + /// Proof: Maintenance Enabled (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) /// Storage: AppPromotion PendingUnstake (r:1 w:1) /// Proof: AppPromotion PendingUnstake (max_values: None, max_size: Some(157), added: 2632, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:3 w:3) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:3 w:3) + /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) /// Storage: System Account (r:3 w:3) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:3 w:0) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// The range of component `b` is `[0, 3]`. fn on_initialize(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `180 + b * (277 ±0)` - // Estimated: `5602 + b * (6377 ±0)` - // Minimum execution time: 3_724_000 picoseconds. - Weight::from_parts(4_538_653, 5602) - // Standard Error: 14_774 - .saturating_add(Weight::from_parts(10_368_686, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(b.into()))) + // Measured: `222 + b * (285 ±0)` + // Estimated: `3622 + b * (3774 ±0)` + // Minimum execution time: 5_034_000 picoseconds. + Weight::from_parts(5_845_442, 3622) + // Standard Error: 18_650 + .saturating_add(Weight::from_parts(13_172_650, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 6377).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } /// Storage: AppPromotion Admin (r:0 w:1) /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) @@ -252,8 +264,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_426_000 picoseconds. - Weight::from_parts(6_149_000, 0) + // Minimum execution time: 4_838_000 picoseconds. + Weight::from_parts(5_022_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: AppPromotion Admin (r:1 w:0) @@ -268,24 +280,26 @@ impl WeightInfo for () { /// Proof: AppPromotion Staked (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) /// Storage: System Account (r:101 w:101) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:100 w:100) + /// Storage: Balances Freezes (r:100 w:100) + /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:100 w:0) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: AppPromotion TotalStaked (r:1 w:1) /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) /// The range of component `b` is `[1, 100]`. fn payout_stakers(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `531 + b * (633 ±0)` - // Estimated: `16194 + b * (32560 ±0)` - // Minimum execution time: 84_632_000 picoseconds. - Weight::from_parts(800_384, 16194) - // Standard Error: 19_457 - .saturating_add(Weight::from_parts(49_393_958, 0).saturating_mul(b.into())) + // Measured: `564 + b * (641 ±0)` + // Estimated: `3593 + b * (25550 ±0)` + // Minimum execution time: 88_317_000 picoseconds. + Weight::from_parts(3_660_713, 3593) + // Standard Error: 17_984 + .saturating_add(Weight::from_parts(58_197_541, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().reads((12_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().reads((13_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((12_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 32560).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 25550).saturating_mul(b.into())) } /// Storage: AppPromotion StakesPerAccount (r:1 w:1) /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) @@ -293,7 +307,9 @@ impl WeightInfo for () { /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) /// Storage: System Account (r:1 w:1) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) + /// Storage: Balances Freezes (r:1 w:1) + /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:0) /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) /// Storage: ParachainSystem ValidationData (r:1 w:0) /// Proof Skipped: ParachainSystem ValidationData (max_values: Some(1), max_size: None, mode: Measured) @@ -303,11 +319,11 @@ impl WeightInfo for () { /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) fn stake() -> Weight { // Proof Size summary in bytes: - // Measured: `356` - // Estimated: `20260` - // Minimum execution time: 24_750_000 picoseconds. - Weight::from_parts(25_157_000, 20260) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `389` + // Estimated: `4764` + // Minimum execution time: 27_232_000 picoseconds. + Weight::from_parts(27_657_000, 4764) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: Configuration AppPromomotionConfigurationOverride (r:1 w:0) @@ -322,10 +338,10 @@ impl WeightInfo for () { /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn unstake_all() -> Weight { // Proof Size summary in bytes: - // Measured: `796` - // Estimated: `35720` - // Minimum execution time: 53_670_000 picoseconds. - Weight::from_parts(54_376_000, 35720) + // Measured: `829` + // Estimated: `29095` + // Minimum execution time: 49_829_000 picoseconds. + Weight::from_parts(50_668_000, 29095) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -341,10 +357,10 @@ impl WeightInfo for () { /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) fn unstake_partial() -> Weight { // Proof Size summary in bytes: - // Measured: `796` - // Estimated: `39234` - // Minimum execution time: 58_317_000 picoseconds. - Weight::from_parts(59_059_000, 39234) + // Measured: `829` + // Estimated: `29095` + // Minimum execution time: 55_678_000 picoseconds. + Weight::from_parts(56_709_000, 29095) .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -354,10 +370,10 @@ impl WeightInfo for () { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn sponsor_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `1027` - // Estimated: `5842` - // Minimum execution time: 18_117_000 picoseconds. - Weight::from_parts(18_634_000, 5842) + // Measured: `1060` + // Estimated: `4325` + // Minimum execution time: 16_628_000 picoseconds. + Weight::from_parts(16_968_000, 4325) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -367,10 +383,10 @@ impl WeightInfo for () { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn stop_sponsoring_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `1059` - // Estimated: `5842` - // Minimum execution time: 16_999_000 picoseconds. - Weight::from_parts(17_417_000, 5842) + // Measured: `1092` + // Estimated: `4325` + // Minimum execution time: 15_970_000 picoseconds. + Weight::from_parts(16_316_000, 4325) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -382,8 +398,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `198` // Estimated: `1517` - // Minimum execution time: 14_438_000 picoseconds. - Weight::from_parts(14_931_000, 1517) + // Minimum execution time: 13_052_000 picoseconds. + Weight::from_parts(13_555_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -393,10 +409,10 @@ impl WeightInfo for () { /// Proof: EvmContractHelpers Sponsoring (max_values: None, max_size: Some(62), added: 2537, mode: MaxEncodedLen) fn stop_sponsoring_contract() -> Weight { // Proof Size summary in bytes: - // Measured: `363` - // Estimated: `5044` - // Minimum execution time: 14_786_000 picoseconds. - Weight::from_parts(15_105_000, 5044) + // Measured: `396` + // Estimated: `3527` + // Minimum execution time: 13_791_000 picoseconds. + Weight::from_parts(14_125_000, 3527) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/collator-selection/src/weights.rs b/pallets/collator-selection/src/weights.rs index cdaf984c00..8080bbed0a 100644 --- a/pallets/collator-selection/src/weights.rs +++ b/pallets/collator-selection/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_collator_selection //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -57,11 +57,11 @@ impl WeightInfo for SubstrateWeight { fn add_invulnerable(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + b * (45 ±0)` - // Estimated: `7485 + b * (45 ±0)` - // Minimum execution time: 14_147_000 picoseconds. - Weight::from_parts(15_313_627, 7485) - // Standard Error: 1_744 - .saturating_add(Weight::from_parts(178_890, 0).saturating_mul(b.into())) + // Estimated: `3873 + b * (45 ±0)` + // Minimum execution time: 13_780_000 picoseconds. + Weight::from_parts(14_067_943, 3873) + // Standard Error: 1_187 + .saturating_add(Weight::from_parts(168_052, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(b.into())) @@ -73,10 +73,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `96 + b * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 9_426_000 picoseconds. - Weight::from_parts(9_693_408, 1806) - // Standard Error: 1_638 - .saturating_add(Weight::from_parts(227_917, 0).saturating_mul(b.into())) + // Minimum execution time: 8_583_000 picoseconds. + Weight::from_parts(8_833_981, 1806) + // Standard Error: 1_399 + .saturating_add(Weight::from_parts(140_293, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -86,18 +86,20 @@ impl WeightInfo for SubstrateWeight { /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) /// Storage: Configuration CollatorSelectionLicenseBondOverride (r:1 w:0) /// Proof: Configuration CollatorSelectionLicenseBondOverride (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) /// The range of component `c` is `[1, 9]`. fn get_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `610 + c * (26 ±0)` - // Estimated: `9099 + c * (28 ±0)` - // Minimum execution time: 22_741_000 picoseconds. - Weight::from_parts(24_210_604, 9099) - // Standard Error: 2_703 - .saturating_add(Weight::from_parts(255_686, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 28).saturating_mul(c.into())) + // Measured: `668 + c * (46 ±0)` + // Estimated: `4131 + c * (47 ±0)` + // Minimum execution time: 29_155_000 picoseconds. + Weight::from_parts(31_569_846, 4131) + // Standard Error: 10_912 + .saturating_add(Weight::from_parts(547_194, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(Weight::from_parts(0, 47).saturating_mul(c.into())) } /// Storage: CollatorSelection LicenseDepositOf (r:1 w:0) /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) @@ -114,12 +116,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[1, 7]`. fn onboard(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `445 + c * (54 ±0)` - // Estimated: `10119` - // Minimum execution time: 20_397_000 picoseconds. - Weight::from_parts(21_415_013, 10119) - // Standard Error: 4_086 - .saturating_add(Weight::from_parts(252_810, 0).saturating_mul(c.into())) + // Measured: `414 + c * (54 ±0)` + // Estimated: `3529` + // Minimum execution time: 17_999_000 picoseconds. + Weight::from_parts(18_533_629, 3529) + // Standard Error: 3_238 + .saturating_add(Weight::from_parts(299_090, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -127,15 +129,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// The range of component `c` is `[1, 10]`. + /// The range of component `c` is `[1, 8]`. fn offboard(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `111 + c * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 10_543_000 picoseconds. - Weight::from_parts(11_227_541, 1806) - // Standard Error: 1_699 - .saturating_add(Weight::from_parts(181_030, 0).saturating_mul(c.into())) + // Minimum execution time: 9_845_000 picoseconds. + Weight::from_parts(10_209_005, 1806) + // Standard Error: 1_137 + .saturating_add(Weight::from_parts(156_275, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -143,37 +145,41 @@ impl WeightInfo for SubstrateWeight { /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) /// Storage: CollatorSelection LicenseDepositOf (r:1 w:1) /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// The range of component `c` is `[1, 10]`. + /// The range of component `c` is `[1, 8]`. fn release_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `306 + c * (61 ±0)` - // Estimated: `5335` - // Minimum execution time: 22_214_000 picoseconds. - Weight::from_parts(24_373_981, 5335) - // Standard Error: 8_018 - .saturating_add(Weight::from_parts(405_404, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `328 + c * (103 ±0)` + // Estimated: `3834` + // Minimum execution time: 28_700_000 picoseconds. + Weight::from_parts(29_499_805, 3834) + // Standard Error: 16_180 + .saturating_add(Weight::from_parts(880_131, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: CollatorSelection Candidates (r:1 w:1) /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) /// Storage: CollatorSelection LicenseDepositOf (r:1 w:1) /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// The range of component `c` is `[1, 10]`. + /// The range of component `c` is `[1, 8]`. fn force_release_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `306 + c * (61 ±0)` - // Estimated: `5335` - // Minimum execution time: 22_159_000 picoseconds. - Weight::from_parts(24_200_796, 5335) - // Standard Error: 8_328 - .saturating_add(Weight::from_parts(312_138, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `328 + c * (103 ±0)` + // Estimated: `3834` + // Minimum execution time: 27_941_000 picoseconds. + Weight::from_parts(28_960_442, 3834) + // Standard Error: 17_391 + .saturating_add(Weight::from_parts(885_880, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: System Account (r:2 w:2) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) @@ -183,10 +189,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) fn note_author() -> Weight { // Proof Size summary in bytes: - // Measured: `155` - // Estimated: `7729` - // Minimum execution time: 16_520_000 picoseconds. - Weight::from_parts(16_933_000, 7729) + // Measured: `157` + // Estimated: `6196` + // Minimum execution time: 22_833_000 picoseconds. + Weight::from_parts(23_223_000, 6196) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -194,32 +200,34 @@ impl WeightInfo for SubstrateWeight { /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:1 w:0) /// Proof: Configuration CollatorSelectionKickThresholdOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:10 w:0) + /// Storage: CollatorSelection LastAuthoredBlock (r:8 w:0) /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) /// Storage: CollatorSelection Invulnerables (r:1 w:0) /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) /// Storage: System BlockWeight (r:1 w:1) /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) - /// Storage: CollatorSelection LicenseDepositOf (r:9 w:9) + /// Storage: CollatorSelection LicenseDepositOf (r:7 w:7) /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:10 w:10) + /// Storage: Balances Holds (r:7 w:7) + /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) + /// Storage: System Account (r:8 w:8) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 10]`. - /// The range of component `c` is `[1, 10]`. + /// The range of component `r` is `[1, 8]`. + /// The range of component `c` is `[1, 8]`. fn new_session(r: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `562 + r * (190 ±0) + c * (83 ±0)` - // Estimated: `91818518943723 + c * (2519 ±0) + r * (5142 ±1)` - // Minimum execution time: 16_153_000 picoseconds. - Weight::from_parts(16_601_000, 91818518943723) - // Standard Error: 119_095 - .saturating_add(Weight::from_parts(10_660_813, 0).saturating_mul(c.into())) + // Measured: `727 + c * (84 ±0) + r * (254 ±0)` + // Estimated: `26857 + c * (2519 ±0) + r * (2844 ±4)` + // Minimum execution time: 15_283_000 picoseconds. + Weight::from_parts(15_615_000, 26857) + // Standard Error: 188_448 + .saturating_add(Weight::from_parts(15_548_718, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 5142).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2844).saturating_mul(r.into())) } } @@ -235,11 +243,11 @@ impl WeightInfo for () { fn add_invulnerable(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + b * (45 ±0)` - // Estimated: `7485 + b * (45 ±0)` - // Minimum execution time: 14_147_000 picoseconds. - Weight::from_parts(15_313_627, 7485) - // Standard Error: 1_744 - .saturating_add(Weight::from_parts(178_890, 0).saturating_mul(b.into())) + // Estimated: `3873 + b * (45 ±0)` + // Minimum execution time: 13_780_000 picoseconds. + Weight::from_parts(14_067_943, 3873) + // Standard Error: 1_187 + .saturating_add(Weight::from_parts(168_052, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(b.into())) @@ -251,10 +259,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `96 + b * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 9_426_000 picoseconds. - Weight::from_parts(9_693_408, 1806) - // Standard Error: 1_638 - .saturating_add(Weight::from_parts(227_917, 0).saturating_mul(b.into())) + // Minimum execution time: 8_583_000 picoseconds. + Weight::from_parts(8_833_981, 1806) + // Standard Error: 1_399 + .saturating_add(Weight::from_parts(140_293, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -264,18 +272,20 @@ impl WeightInfo for () { /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) /// Storage: Configuration CollatorSelectionLicenseBondOverride (r:1 w:0) /// Proof: Configuration CollatorSelectionLicenseBondOverride (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) /// The range of component `c` is `[1, 9]`. fn get_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `610 + c * (26 ±0)` - // Estimated: `9099 + c * (28 ±0)` - // Minimum execution time: 22_741_000 picoseconds. - Weight::from_parts(24_210_604, 9099) - // Standard Error: 2_703 - .saturating_add(Weight::from_parts(255_686, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 28).saturating_mul(c.into())) + // Measured: `668 + c * (46 ±0)` + // Estimated: `4131 + c * (47 ±0)` + // Minimum execution time: 29_155_000 picoseconds. + Weight::from_parts(31_569_846, 4131) + // Standard Error: 10_912 + .saturating_add(Weight::from_parts(547_194, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(Weight::from_parts(0, 47).saturating_mul(c.into())) } /// Storage: CollatorSelection LicenseDepositOf (r:1 w:0) /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) @@ -292,12 +302,12 @@ impl WeightInfo for () { /// The range of component `c` is `[1, 7]`. fn onboard(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `445 + c * (54 ±0)` - // Estimated: `10119` - // Minimum execution time: 20_397_000 picoseconds. - Weight::from_parts(21_415_013, 10119) - // Standard Error: 4_086 - .saturating_add(Weight::from_parts(252_810, 0).saturating_mul(c.into())) + // Measured: `414 + c * (54 ±0)` + // Estimated: `3529` + // Minimum execution time: 17_999_000 picoseconds. + Weight::from_parts(18_533_629, 3529) + // Standard Error: 3_238 + .saturating_add(Weight::from_parts(299_090, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -305,15 +315,15 @@ impl WeightInfo for () { /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// The range of component `c` is `[1, 10]`. + /// The range of component `c` is `[1, 8]`. fn offboard(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `111 + c * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 10_543_000 picoseconds. - Weight::from_parts(11_227_541, 1806) - // Standard Error: 1_699 - .saturating_add(Weight::from_parts(181_030, 0).saturating_mul(c.into())) + // Minimum execution time: 9_845_000 picoseconds. + Weight::from_parts(10_209_005, 1806) + // Standard Error: 1_137 + .saturating_add(Weight::from_parts(156_275, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -321,37 +331,41 @@ impl WeightInfo for () { /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) /// Storage: CollatorSelection LicenseDepositOf (r:1 w:1) /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// The range of component `c` is `[1, 10]`. + /// The range of component `c` is `[1, 8]`. fn release_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `306 + c * (61 ±0)` - // Estimated: `5335` - // Minimum execution time: 22_214_000 picoseconds. - Weight::from_parts(24_373_981, 5335) - // Standard Error: 8_018 - .saturating_add(Weight::from_parts(405_404, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `328 + c * (103 ±0)` + // Estimated: `3834` + // Minimum execution time: 28_700_000 picoseconds. + Weight::from_parts(29_499_805, 3834) + // Standard Error: 16_180 + .saturating_add(Weight::from_parts(880_131, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: CollatorSelection Candidates (r:1 w:1) /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) /// Storage: CollatorSelection LicenseDepositOf (r:1 w:1) /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) + /// Storage: Balances Holds (r:1 w:1) + /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// The range of component `c` is `[1, 10]`. + /// The range of component `c` is `[1, 8]`. fn force_release_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `306 + c * (61 ±0)` - // Estimated: `5335` - // Minimum execution time: 22_159_000 picoseconds. - Weight::from_parts(24_200_796, 5335) - // Standard Error: 8_328 - .saturating_add(Weight::from_parts(312_138, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `328 + c * (103 ±0)` + // Estimated: `3834` + // Minimum execution time: 27_941_000 picoseconds. + Weight::from_parts(28_960_442, 3834) + // Standard Error: 17_391 + .saturating_add(Weight::from_parts(885_880, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: System Account (r:2 w:2) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) @@ -361,10 +375,10 @@ impl WeightInfo for () { /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) fn note_author() -> Weight { // Proof Size summary in bytes: - // Measured: `155` - // Estimated: `7729` - // Minimum execution time: 16_520_000 picoseconds. - Weight::from_parts(16_933_000, 7729) + // Measured: `157` + // Estimated: `6196` + // Minimum execution time: 22_833_000 picoseconds. + Weight::from_parts(23_223_000, 6196) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -372,32 +386,34 @@ impl WeightInfo for () { /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:1 w:0) /// Proof: Configuration CollatorSelectionKickThresholdOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:10 w:0) + /// Storage: CollatorSelection LastAuthoredBlock (r:8 w:0) /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) /// Storage: CollatorSelection Invulnerables (r:1 w:0) /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) /// Storage: System BlockWeight (r:1 w:1) /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) - /// Storage: CollatorSelection LicenseDepositOf (r:9 w:9) + /// Storage: CollatorSelection LicenseDepositOf (r:7 w:7) /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:10 w:10) + /// Storage: Balances Holds (r:7 w:7) + /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) + /// Storage: System Account (r:8 w:8) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 10]`. - /// The range of component `c` is `[1, 10]`. + /// The range of component `r` is `[1, 8]`. + /// The range of component `c` is `[1, 8]`. fn new_session(r: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `562 + r * (190 ±0) + c * (83 ±0)` - // Estimated: `91818518943723 + c * (2519 ±0) + r * (5142 ±1)` - // Minimum execution time: 16_153_000 picoseconds. - Weight::from_parts(16_601_000, 91818518943723) - // Standard Error: 119_095 - .saturating_add(Weight::from_parts(10_660_813, 0).saturating_mul(c.into())) + // Measured: `727 + c * (84 ±0) + r * (254 ±0)` + // Estimated: `26857 + c * (2519 ±0) + r * (2844 ±4)` + // Minimum execution time: 15_283_000 picoseconds. + Weight::from_parts(15_615_000, 26857) + // Standard Error: 188_448 + .saturating_add(Weight::from_parts(15_548_718, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 5142).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2844).saturating_mul(r.into())) } } diff --git a/pallets/common/src/weights.rs b/pallets/common/src/weights.rs index 125fd76b1d..6b8edb32f2 100644 --- a/pallets/common/src/weights.rs +++ b/pallets/common/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_common //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -46,12 +46,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 64]`. fn set_collection_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `265` + // Measured: `298` // Estimated: `44457` - // Minimum execution time: 6_805_000 picoseconds. - Weight::from_parts(6_965_000, 44457) - // Standard Error: 20_175 - .saturating_add(Weight::from_parts(6_191_369, 0).saturating_mul(b.into())) + // Minimum execution time: 6_140_000 picoseconds. + Weight::from_parts(2_807_337, 44457) + // Standard Error: 15_773 + .saturating_add(Weight::from_parts(5_454_582, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -60,12 +60,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 64]`. fn delete_collection_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `270 + b * (33030 ±0)` + // Measured: `303 + b * (33030 ±0)` // Estimated: `44457` - // Minimum execution time: 6_284_000 picoseconds. - Weight::from_parts(6_416_000, 44457) - // Standard Error: 81_929 - .saturating_add(Weight::from_parts(23_972_425, 0).saturating_mul(b.into())) + // Minimum execution time: 5_969_000 picoseconds. + Weight::from_parts(6_153_000, 44457) + // Standard Error: 82_905 + .saturating_add(Weight::from_parts(23_575_983, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -73,10 +73,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) fn check_accesslist() -> Weight { // Proof Size summary in bytes: - // Measured: `340` + // Measured: `373` // Estimated: `3535` - // Minimum execution time: 5_205_000 picoseconds. - Weight::from_parts(5_438_000, 3535) + // Minimum execution time: 4_999_000 picoseconds. + Weight::from_parts(5_178_000, 3535) .saturating_add(T::DbWeight::get().reads(1_u64)) } } @@ -88,12 +88,12 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 64]`. fn set_collection_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `265` + // Measured: `298` // Estimated: `44457` - // Minimum execution time: 6_805_000 picoseconds. - Weight::from_parts(6_965_000, 44457) - // Standard Error: 20_175 - .saturating_add(Weight::from_parts(6_191_369, 0).saturating_mul(b.into())) + // Minimum execution time: 6_140_000 picoseconds. + Weight::from_parts(2_807_337, 44457) + // Standard Error: 15_773 + .saturating_add(Weight::from_parts(5_454_582, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -102,12 +102,12 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 64]`. fn delete_collection_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `270 + b * (33030 ±0)` + // Measured: `303 + b * (33030 ±0)` // Estimated: `44457` - // Minimum execution time: 6_284_000 picoseconds. - Weight::from_parts(6_416_000, 44457) - // Standard Error: 81_929 - .saturating_add(Weight::from_parts(23_972_425, 0).saturating_mul(b.into())) + // Minimum execution time: 5_969_000 picoseconds. + Weight::from_parts(6_153_000, 44457) + // Standard Error: 82_905 + .saturating_add(Weight::from_parts(23_575_983, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -115,10 +115,10 @@ impl WeightInfo for () { /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) fn check_accesslist() -> Weight { // Proof Size summary in bytes: - // Measured: `340` + // Measured: `373` // Estimated: `3535` - // Minimum execution time: 5_205_000 picoseconds. - Weight::from_parts(5_438_000, 3535) + // Minimum execution time: 4_999_000 picoseconds. + Weight::from_parts(5_178_000, 3535) .saturating_add(RocksDbWeight::get().reads(1_u64)) } } diff --git a/pallets/configuration/src/weights.rs b/pallets/configuration/src/weights.rs index 73259db617..dba588351a 100644 --- a/pallets/configuration/src/weights.rs +++ b/pallets/configuration/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_configuration //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -50,19 +50,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_725_000 picoseconds. - Weight::from_parts(1_853_000, 0) + // Minimum execution time: 1_496_000 picoseconds. + Weight::from_parts(1_575_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Configuration MinGasPriceOverride (r:0 w:1) /// Proof: Configuration MinGasPriceOverride (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: unknown `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) + /// Proof Skipped: unknown `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) + /// Storage: unknown `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) + /// Proof Skipped: unknown `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) fn set_min_gas_price_override() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_802_000 picoseconds. - Weight::from_parts(1_903_000, 0) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Minimum execution time: 2_293_000 picoseconds. + Weight::from_parts(2_425_000, 0) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Configuration AppPromomotionConfigurationOverride (r:0 w:1) /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) @@ -70,8 +74,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_048_000 picoseconds. - Weight::from_parts(2_157_000, 0) + // Minimum execution time: 1_542_000 picoseconds. + Weight::from_parts(1_633_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionDesiredCollatorsOverride (r:0 w:1) @@ -80,8 +84,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_622_000 picoseconds. - Weight::from_parts(8_014_000, 0) + // Minimum execution time: 6_388_000 picoseconds. + Weight::from_parts(6_639_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionLicenseBondOverride (r:0 w:1) @@ -90,8 +94,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_981_000 picoseconds. - Weight::from_parts(5_811_000, 0) + // Minimum execution time: 3_781_000 picoseconds. + Weight::from_parts(3_947_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:0 w:1) @@ -100,8 +104,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_664_000 picoseconds. - Weight::from_parts(4_816_000, 0) + // Minimum execution time: 3_839_000 picoseconds. + Weight::from_parts(4_030_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -114,19 +118,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_725_000 picoseconds. - Weight::from_parts(1_853_000, 0) + // Minimum execution time: 1_496_000 picoseconds. + Weight::from_parts(1_575_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Configuration MinGasPriceOverride (r:0 w:1) /// Proof: Configuration MinGasPriceOverride (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: unknown `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) + /// Proof Skipped: unknown `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) + /// Storage: unknown `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) + /// Proof Skipped: unknown `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) fn set_min_gas_price_override() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_802_000 picoseconds. - Weight::from_parts(1_903_000, 0) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Minimum execution time: 2_293_000 picoseconds. + Weight::from_parts(2_425_000, 0) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Configuration AppPromomotionConfigurationOverride (r:0 w:1) /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) @@ -134,8 +142,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_048_000 picoseconds. - Weight::from_parts(2_157_000, 0) + // Minimum execution time: 1_542_000 picoseconds. + Weight::from_parts(1_633_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionDesiredCollatorsOverride (r:0 w:1) @@ -144,8 +152,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_622_000 picoseconds. - Weight::from_parts(8_014_000, 0) + // Minimum execution time: 6_388_000 picoseconds. + Weight::from_parts(6_639_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionLicenseBondOverride (r:0 w:1) @@ -154,8 +162,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_981_000 picoseconds. - Weight::from_parts(5_811_000, 0) + // Minimum execution time: 3_781_000 picoseconds. + Weight::from_parts(3_947_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:0 w:1) @@ -164,8 +172,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_664_000 picoseconds. - Weight::from_parts(4_816_000, 0) + // Minimum execution time: 3_839_000 picoseconds. + Weight::from_parts(4_030_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/pallets/evm-migration/src/weights.rs b/pallets/evm-migration/src/weights.rs index 3d206b2114..487fbe51be 100644 --- a/pallets/evm-migration/src/weights.rs +++ b/pallets/evm-migration/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_evm_migration //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -52,9 +52,9 @@ impl WeightInfo for SubstrateWeight { fn begin() -> Weight { // Proof Size summary in bytes: // Measured: `94` - // Estimated: `10646` - // Minimum execution time: 8_519_000 picoseconds. - Weight::from_parts(8_729_000, 10646) + // Estimated: `3593` + // Minimum execution time: 7_754_000 picoseconds. + Weight::from_parts(7_933_000, 3593) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -66,11 +66,11 @@ impl WeightInfo for SubstrateWeight { fn set_data(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `96` - // Estimated: `3590` - // Minimum execution time: 6_062_000 picoseconds. - Weight::from_parts(7_193_727, 3590) - // Standard Error: 1_844 - .saturating_add(Weight::from_parts(876_826, 0).saturating_mul(b.into())) + // Estimated: `3494` + // Minimum execution time: 5_694_000 picoseconds. + Weight::from_parts(5_971_220, 3494) + // Standard Error: 724 + .saturating_add(Weight::from_parts(802_677, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(b.into()))) } @@ -79,12 +79,14 @@ impl WeightInfo for SubstrateWeight { /// Storage: EVM AccountCodes (r:0 w:1) /// Proof Skipped: EVM AccountCodes (max_values: None, max_size: None, mode: Measured) /// The range of component `b` is `[0, 80]`. - fn finish(_b: u32, ) -> Weight { + fn finish(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `96` - // Estimated: `3590` - // Minimum execution time: 7_452_000 picoseconds. - Weight::from_parts(8_531_888, 3590) + // Estimated: `3494` + // Minimum execution time: 6_960_000 picoseconds. + Weight::from_parts(7_397_101, 3494) + // Standard Error: 65 + .saturating_add(Weight::from_parts(1_120, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -93,20 +95,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_377_000 picoseconds. - Weight::from_parts(3_388_877, 0) - // Standard Error: 1_205 - .saturating_add(Weight::from_parts(696_701, 0).saturating_mul(b.into())) + // Minimum execution time: 1_188_000 picoseconds. + Weight::from_parts(2_079_143, 0) + // Standard Error: 634 + .saturating_add(Weight::from_parts(541_830, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 200]`. fn insert_events(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_671_000 picoseconds. - Weight::from_parts(4_402_497, 0) - // Standard Error: 723 - .saturating_add(Weight::from_parts(1_338_678, 0).saturating_mul(b.into())) + // Minimum execution time: 1_223_000 picoseconds. + Weight::from_parts(2_614_838, 0) + // Standard Error: 878 + .saturating_add(Weight::from_parts(1_212_908, 0).saturating_mul(b.into())) } } @@ -121,9 +123,9 @@ impl WeightInfo for () { fn begin() -> Weight { // Proof Size summary in bytes: // Measured: `94` - // Estimated: `10646` - // Minimum execution time: 8_519_000 picoseconds. - Weight::from_parts(8_729_000, 10646) + // Estimated: `3593` + // Minimum execution time: 7_754_000 picoseconds. + Weight::from_parts(7_933_000, 3593) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -135,11 +137,11 @@ impl WeightInfo for () { fn set_data(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `96` - // Estimated: `3590` - // Minimum execution time: 6_062_000 picoseconds. - Weight::from_parts(7_193_727, 3590) - // Standard Error: 1_844 - .saturating_add(Weight::from_parts(876_826, 0).saturating_mul(b.into())) + // Estimated: `3494` + // Minimum execution time: 5_694_000 picoseconds. + Weight::from_parts(5_971_220, 3494) + // Standard Error: 724 + .saturating_add(Weight::from_parts(802_677, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(b.into()))) } @@ -148,12 +150,14 @@ impl WeightInfo for () { /// Storage: EVM AccountCodes (r:0 w:1) /// Proof Skipped: EVM AccountCodes (max_values: None, max_size: None, mode: Measured) /// The range of component `b` is `[0, 80]`. - fn finish(_b: u32, ) -> Weight { + fn finish(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `96` - // Estimated: `3590` - // Minimum execution time: 7_452_000 picoseconds. - Weight::from_parts(8_531_888, 3590) + // Estimated: `3494` + // Minimum execution time: 6_960_000 picoseconds. + Weight::from_parts(7_397_101, 3494) + // Standard Error: 65 + .saturating_add(Weight::from_parts(1_120, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -162,20 +166,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_377_000 picoseconds. - Weight::from_parts(3_388_877, 0) - // Standard Error: 1_205 - .saturating_add(Weight::from_parts(696_701, 0).saturating_mul(b.into())) + // Minimum execution time: 1_188_000 picoseconds. + Weight::from_parts(2_079_143, 0) + // Standard Error: 634 + .saturating_add(Weight::from_parts(541_830, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 200]`. fn insert_events(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_671_000 picoseconds. - Weight::from_parts(4_402_497, 0) - // Standard Error: 723 - .saturating_add(Weight::from_parts(1_338_678, 0).saturating_mul(b.into())) + // Minimum execution time: 1_223_000 picoseconds. + Weight::from_parts(2_614_838, 0) + // Standard Error: 878 + .saturating_add(Weight::from_parts(1_212_908, 0).saturating_mul(b.into())) } } diff --git a/pallets/foreign-assets/src/weights.rs b/pallets/foreign-assets/src/weights.rs index 96dc92cb2f..9d4c537618 100644 --- a/pallets/foreign-assets/src/weights.rs +++ b/pallets/foreign-assets/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_foreign_assets //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -56,6 +56,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: ForeignAssets AssetMetadatas (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) /// Storage: ForeignAssets AssetBinding (r:1 w:1) /// Proof: ForeignAssets AssetBinding (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) + /// Storage: Common AdminAmount (r:0 w:1) + /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:0 w:1) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Common CollectionProperties (r:0 w:1) @@ -65,11 +67,11 @@ impl WeightInfo for SubstrateWeight { fn register_foreign_asset() -> Weight { // Proof Size summary in bytes: // Measured: `286` - // Estimated: `25838` - // Minimum execution time: 37_778_000 picoseconds. - Weight::from_parts(38_334_000, 25838) + // Estimated: `6196` + // Minimum execution time: 44_390_000 picoseconds. + Weight::from_parts(45_078_000, 6196) .saturating_add(T::DbWeight::get().reads(9_u64)) - .saturating_add(T::DbWeight::get().writes(11_u64)) + .saturating_add(T::DbWeight::get().writes(12_u64)) } /// Storage: ForeignAssets ForeignAssetLocations (r:1 w:1) /// Proof: ForeignAssets ForeignAssetLocations (max_values: None, max_size: Some(614), added: 3089, mode: MaxEncodedLen) @@ -78,9 +80,9 @@ impl WeightInfo for SubstrateWeight { fn update_foreign_asset() -> Weight { // Proof Size summary in bytes: // Measured: `197` - // Estimated: `7615` - // Minimum execution time: 13_739_000 picoseconds. - Weight::from_parts(22_366_000, 7615) + // Estimated: `4079` + // Minimum execution time: 12_945_000 picoseconds. + Weight::from_parts(13_629_000, 4079) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -104,6 +106,8 @@ impl WeightInfo for () { /// Proof: ForeignAssets AssetMetadatas (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) /// Storage: ForeignAssets AssetBinding (r:1 w:1) /// Proof: ForeignAssets AssetBinding (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) + /// Storage: Common AdminAmount (r:0 w:1) + /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:0 w:1) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Common CollectionProperties (r:0 w:1) @@ -113,11 +117,11 @@ impl WeightInfo for () { fn register_foreign_asset() -> Weight { // Proof Size summary in bytes: // Measured: `286` - // Estimated: `25838` - // Minimum execution time: 37_778_000 picoseconds. - Weight::from_parts(38_334_000, 25838) + // Estimated: `6196` + // Minimum execution time: 44_390_000 picoseconds. + Weight::from_parts(45_078_000, 6196) .saturating_add(RocksDbWeight::get().reads(9_u64)) - .saturating_add(RocksDbWeight::get().writes(11_u64)) + .saturating_add(RocksDbWeight::get().writes(12_u64)) } /// Storage: ForeignAssets ForeignAssetLocations (r:1 w:1) /// Proof: ForeignAssets ForeignAssetLocations (max_values: None, max_size: Some(614), added: 3089, mode: MaxEncodedLen) @@ -126,9 +130,9 @@ impl WeightInfo for () { fn update_foreign_asset() -> Weight { // Proof Size summary in bytes: // Measured: `197` - // Estimated: `7615` - // Minimum execution time: 13_739_000 picoseconds. - Weight::from_parts(22_366_000, 7615) + // Estimated: `4079` + // Minimum execution time: 12_945_000 picoseconds. + Weight::from_parts(13_629_000, 4079) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/pallets/fungible/src/weights.rs b/pallets/fungible/src/weights.rs index fab1f37f7c..b9e478558f 100644 --- a/pallets/fungible/src/weights.rs +++ b/pallets/fungible/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_fungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -54,9 +54,9 @@ impl WeightInfo for SubstrateWeight { fn create_item() -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `7035` - // Minimum execution time: 10_168_000 picoseconds. - Weight::from_parts(10_453_000, 7035) + // Estimated: `3542` + // Minimum execution time: 9_344_000 picoseconds. + Weight::from_parts(9_600_000, 3542) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -68,11 +68,11 @@ impl WeightInfo for SubstrateWeight { fn create_multiple_items_ex(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `4483 + b * (2552 ±0)` - // Minimum execution time: 3_248_000 picoseconds. - Weight::from_parts(12_455_981, 4483) - // Standard Error: 2_698 - .saturating_add(Weight::from_parts(3_426_148, 0).saturating_mul(b.into())) + // Estimated: `3493 + b * (2552 ±0)` + // Minimum execution time: 2_993_000 picoseconds. + Weight::from_parts(5_240_270, 3493) + // Standard Error: 1_763 + .saturating_add(Weight::from_parts(3_193_198, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -86,9 +86,9 @@ impl WeightInfo for SubstrateWeight { fn burn_item() -> Weight { // Proof Size summary in bytes: // Measured: `197` - // Estimated: `7035` - // Minimum execution time: 12_717_000 picoseconds. - Weight::from_parts(13_031_000, 7035) + // Estimated: `3542` + // Minimum execution time: 11_725_000 picoseconds. + Weight::from_parts(12_140_000, 3542) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -98,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `182` // Estimated: `6094` - // Minimum execution time: 13_640_000 picoseconds. - Weight::from_parts(13_935_000, 6094) + // Minimum execution time: 12_552_000 picoseconds. + Weight::from_parts(12_894_000, 6094) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -111,8 +111,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `182` // Estimated: `3542` - // Minimum execution time: 11_769_000 picoseconds. - Weight::from_parts(12_072_000, 3542) + // Minimum execution time: 10_854_000 picoseconds. + Weight::from_parts(11_125_000, 3542) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -124,8 +124,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3542` - // Minimum execution time: 11_603_000 picoseconds. - Weight::from_parts(12_003_000, 3542) + // Minimum execution time: 10_912_000 picoseconds. + Weight::from_parts(11_163_000, 3542) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -135,8 +135,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `210` // Estimated: `3558` - // Minimum execution time: 5_682_000 picoseconds. - Weight::from_parts(5_892_000, 3558) + // Minimum execution time: 5_283_000 picoseconds. + Weight::from_parts(5_474_000, 3558) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Fungible Allowance (r:0 w:1) @@ -145,8 +145,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_415_000 picoseconds. - Weight::from_parts(6_599_000, 0) + // Minimum execution time: 5_774_000 picoseconds. + Weight::from_parts(5_909_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Fungible Allowance (r:1 w:1) @@ -158,9 +158,9 @@ impl WeightInfo for SubstrateWeight { fn burn_from() -> Weight { // Proof Size summary in bytes: // Measured: `315` - // Estimated: `10593` - // Minimum execution time: 20_257_000 picoseconds. - Weight::from_parts(20_625_000, 10593) + // Estimated: `3558` + // Minimum execution time: 18_184_000 picoseconds. + Weight::from_parts(18_524_000, 3558) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -175,9 +175,9 @@ impl WeightInfo for () { fn create_item() -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `7035` - // Minimum execution time: 10_168_000 picoseconds. - Weight::from_parts(10_453_000, 7035) + // Estimated: `3542` + // Minimum execution time: 9_344_000 picoseconds. + Weight::from_parts(9_600_000, 3542) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -189,11 +189,11 @@ impl WeightInfo for () { fn create_multiple_items_ex(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `4483 + b * (2552 ±0)` - // Minimum execution time: 3_248_000 picoseconds. - Weight::from_parts(12_455_981, 4483) - // Standard Error: 2_698 - .saturating_add(Weight::from_parts(3_426_148, 0).saturating_mul(b.into())) + // Estimated: `3493 + b * (2552 ±0)` + // Minimum execution time: 2_993_000 picoseconds. + Weight::from_parts(5_240_270, 3493) + // Standard Error: 1_763 + .saturating_add(Weight::from_parts(3_193_198, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -207,9 +207,9 @@ impl WeightInfo for () { fn burn_item() -> Weight { // Proof Size summary in bytes: // Measured: `197` - // Estimated: `7035` - // Minimum execution time: 12_717_000 picoseconds. - Weight::from_parts(13_031_000, 7035) + // Estimated: `3542` + // Minimum execution time: 11_725_000 picoseconds. + Weight::from_parts(12_140_000, 3542) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -219,8 +219,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `182` // Estimated: `6094` - // Minimum execution time: 13_640_000 picoseconds. - Weight::from_parts(13_935_000, 6094) + // Minimum execution time: 12_552_000 picoseconds. + Weight::from_parts(12_894_000, 6094) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -232,8 +232,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `182` // Estimated: `3542` - // Minimum execution time: 11_769_000 picoseconds. - Weight::from_parts(12_072_000, 3542) + // Minimum execution time: 10_854_000 picoseconds. + Weight::from_parts(11_125_000, 3542) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -245,8 +245,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3542` - // Minimum execution time: 11_603_000 picoseconds. - Weight::from_parts(12_003_000, 3542) + // Minimum execution time: 10_912_000 picoseconds. + Weight::from_parts(11_163_000, 3542) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -256,8 +256,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `210` // Estimated: `3558` - // Minimum execution time: 5_682_000 picoseconds. - Weight::from_parts(5_892_000, 3558) + // Minimum execution time: 5_283_000 picoseconds. + Weight::from_parts(5_474_000, 3558) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Fungible Allowance (r:0 w:1) @@ -266,8 +266,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_415_000 picoseconds. - Weight::from_parts(6_599_000, 0) + // Minimum execution time: 5_774_000 picoseconds. + Weight::from_parts(5_909_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Fungible Allowance (r:1 w:1) @@ -279,9 +279,9 @@ impl WeightInfo for () { fn burn_from() -> Weight { // Proof Size summary in bytes: // Measured: `315` - // Estimated: `10593` - // Minimum execution time: 20_257_000 picoseconds. - Weight::from_parts(20_625_000, 10593) + // Estimated: `3558` + // Minimum execution time: 18_184_000 picoseconds. + Weight::from_parts(18_524_000, 3558) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/pallets/identity/src/weights.rs b/pallets/identity/src/weights.rs index 395b00144d..483bf012ee 100644 --- a/pallets/identity/src/weights.rs +++ b/pallets/identity/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_identity //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -64,10 +64,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `31 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 9_094_000 picoseconds. - Weight::from_parts(10_431_627, 2626) - // Standard Error: 1_046 - .saturating_add(Weight::from_parts(99_468, 0).saturating_mul(r.into())) + // Minimum execution time: 8_952_000 picoseconds. + Weight::from_parts(9_493_179, 2626) + // Standard Error: 776 + .saturating_add(Weight::from_parts(95_408, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -79,12 +79,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `441 + r * (5 ±0)` // Estimated: `11003` - // Minimum execution time: 18_662_000 picoseconds. - Weight::from_parts(17_939_760, 11003) - // Standard Error: 2_371 - .saturating_add(Weight::from_parts(22_184, 0).saturating_mul(r.into())) - // Standard Error: 462 - .saturating_add(Weight::from_parts(151_368, 0).saturating_mul(x.into())) + // Minimum execution time: 18_573_000 picoseconds. + Weight::from_parts(16_665_333, 11003) + // Standard Error: 1_927 + .saturating_add(Weight::from_parts(93_600, 0).saturating_mul(r.into())) + // Standard Error: 376 + .saturating_add(Weight::from_parts(134_895, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -98,11 +98,11 @@ impl WeightInfo for SubstrateWeight { fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `18716 + s * (2589 ±0)` - // Minimum execution time: 6_921_000 picoseconds. - Weight::from_parts(16_118_195, 18716) - // Standard Error: 1_786 - .saturating_add(Weight::from_parts(1_350_155, 0).saturating_mul(s.into())) + // Estimated: `11003 + s * (2589 ±0)` + // Minimum execution time: 6_669_000 picoseconds. + Weight::from_parts(15_167_928, 11003) + // Standard Error: 1_551 + .saturating_add(Weight::from_parts(1_294_015, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -119,11 +119,11 @@ impl WeightInfo for SubstrateWeight { fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `193 + p * (32 ±0)` - // Estimated: `17726` - // Minimum execution time: 6_858_000 picoseconds. - Weight::from_parts(16_222_054, 17726) - // Standard Error: 1_409 - .saturating_add(Weight::from_parts(593_588, 0).saturating_mul(p.into())) + // Estimated: `11003` + // Minimum execution time: 6_642_000 picoseconds. + Weight::from_parts(15_473_100, 11003) + // Standard Error: 1_132 + .saturating_add(Weight::from_parts(592_570, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -140,15 +140,15 @@ impl WeightInfo for SubstrateWeight { fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `468 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `17726` - // Minimum execution time: 27_212_000 picoseconds. - Weight::from_parts(19_030_840, 17726) - // Standard Error: 3_118 - .saturating_add(Weight::from_parts(29_836, 0).saturating_mul(r.into())) - // Standard Error: 608 - .saturating_add(Weight::from_parts(590_661, 0).saturating_mul(s.into())) - // Standard Error: 608 - .saturating_add(Weight::from_parts(110_108, 0).saturating_mul(x.into())) + // Estimated: `11003` + // Minimum execution time: 29_150_000 picoseconds. + Weight::from_parts(20_982_965, 11003) + // Standard Error: 2_448 + .saturating_add(Weight::from_parts(17_611, 0).saturating_mul(r.into())) + // Standard Error: 478 + .saturating_add(Weight::from_parts(582_119, 0).saturating_mul(s.into())) + // Standard Error: 478 + .saturating_add(Weight::from_parts(114_020, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -162,13 +162,13 @@ impl WeightInfo for SubstrateWeight { fn request_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `366 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `13629` - // Minimum execution time: 19_771_000 picoseconds. - Weight::from_parts(18_917_892, 13629) - // Standard Error: 1_957 - .saturating_add(Weight::from_parts(57_465, 0).saturating_mul(r.into())) - // Standard Error: 381 - .saturating_add(Weight::from_parts(168_586, 0).saturating_mul(x.into())) + // Estimated: `11003` + // Minimum execution time: 20_088_000 picoseconds. + Weight::from_parts(18_524_805, 11003) + // Standard Error: 1_965 + .saturating_add(Weight::from_parts(74_420, 0).saturating_mul(r.into())) + // Standard Error: 383 + .saturating_add(Weight::from_parts(152_570, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -180,12 +180,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `397 + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 17_411_000 picoseconds. - Weight::from_parts(16_856_331, 11003) - // Standard Error: 7_002 - .saturating_add(Weight::from_parts(34_389, 0).saturating_mul(r.into())) - // Standard Error: 1_366 - .saturating_add(Weight::from_parts(165_686, 0).saturating_mul(x.into())) + // Minimum execution time: 17_277_000 picoseconds. + Weight::from_parts(16_760_327, 11003) + // Standard Error: 1_967 + .saturating_add(Weight::from_parts(33_499, 0).saturating_mul(r.into())) + // Standard Error: 383 + .saturating_add(Weight::from_parts(148_237, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -196,10 +196,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_089_000 picoseconds. - Weight::from_parts(7_750_487, 2626) - // Standard Error: 1_625 - .saturating_add(Weight::from_parts(101_135, 0).saturating_mul(r.into())) + // Minimum execution time: 6_566_000 picoseconds. + Weight::from_parts(6_982_669, 2626) + // Standard Error: 613 + .saturating_add(Weight::from_parts(77_476, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -210,10 +210,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_300_000 picoseconds. - Weight::from_parts(6_836_140, 2626) - // Standard Error: 655 - .saturating_add(Weight::from_parts(102_284, 0).saturating_mul(r.into())) + // Minimum execution time: 5_856_000 picoseconds. + Weight::from_parts(6_185_551, 2626) + // Standard Error: 466 + .saturating_add(Weight::from_parts(76_432, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -224,10 +224,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_257_000 picoseconds. - Weight::from_parts(6_917_052, 2626) - // Standard Error: 2_628 - .saturating_add(Weight::from_parts(71_362, 0).saturating_mul(r.into())) + // Minimum execution time: 5_784_000 picoseconds. + Weight::from_parts(6_029_409, 2626) + // Standard Error: 389 + .saturating_add(Weight::from_parts(73_636, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -240,13 +240,13 @@ impl WeightInfo for SubstrateWeight { fn provide_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `444 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `13629` - // Minimum execution time: 16_021_000 picoseconds. - Weight::from_parts(15_553_670, 13629) - // Standard Error: 5_797 - .saturating_add(Weight::from_parts(42_423, 0).saturating_mul(r.into())) - // Standard Error: 1_072 - .saturating_add(Weight::from_parts(252_721, 0).saturating_mul(x.into())) + // Estimated: `11003` + // Minimum execution time: 15_250_000 picoseconds. + Weight::from_parts(14_264_575, 11003) + // Standard Error: 1_996 + .saturating_add(Weight::from_parts(45_342, 0).saturating_mul(r.into())) + // Standard Error: 369 + .saturating_add(Weight::from_parts(216_509, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -264,15 +264,15 @@ impl WeightInfo for SubstrateWeight { fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `665 + r * (12 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `23922` - // Minimum execution time: 40_801_000 picoseconds. - Weight::from_parts(34_079_397, 23922) - // Standard Error: 3_750 - .saturating_add(Weight::from_parts(31_496, 0).saturating_mul(r.into())) - // Standard Error: 732 - .saturating_add(Weight::from_parts(599_691, 0).saturating_mul(s.into())) - // Standard Error: 732 - .saturating_add(Weight::from_parts(101_683, 0).saturating_mul(x.into())) + // Estimated: `11003` + // Minimum execution time: 43_178_000 picoseconds. + Weight::from_parts(34_379_402, 11003) + // Standard Error: 2_843 + .saturating_add(Weight::from_parts(58_134, 0).saturating_mul(r.into())) + // Standard Error: 555 + .saturating_add(Weight::from_parts(596_395, 0).saturating_mul(s.into())) + // Standard Error: 555 + .saturating_add(Weight::from_parts(113_292, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -285,12 +285,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_412_000 picoseconds. - Weight::from_parts(4_592_000, 0) - // Standard Error: 703_509 - .saturating_add(Weight::from_parts(43_647_925, 0).saturating_mul(x.into())) - // Standard Error: 117_043 - .saturating_add(Weight::from_parts(9_312_431, 0).saturating_mul(n.into())) + // Minimum execution time: 4_093_000 picoseconds. + Weight::from_parts(4_184_000, 0) + // Standard Error: 688_514 + .saturating_add(Weight::from_parts(42_213_609, 0).saturating_mul(x.into())) + // Standard Error: 114_549 + .saturating_add(Weight::from_parts(8_812_982, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } /// Storage: Identity SubsOf (r:600 w:0) @@ -303,12 +303,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 3_824_000 picoseconds. - Weight::from_parts(3_950_000, 990) - // Standard Error: 2_864 - .saturating_add(Weight::from_parts(55_678, 0).saturating_mul(x.into())) - // Standard Error: 476 - .saturating_add(Weight::from_parts(1_138_349, 0).saturating_mul(n.into())) + // Minimum execution time: 3_998_000 picoseconds. + Weight::from_parts(4_142_000, 990) + // Standard Error: 2_992 + .saturating_add(Weight::from_parts(62_923, 0).saturating_mul(x.into())) + // Standard Error: 497 + .saturating_add(Weight::from_parts(1_122_767, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5733).saturating_mul(n.into())) @@ -323,12 +323,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 4_196_000 picoseconds. - Weight::from_parts(4_340_000, 990) - // Standard Error: 2_081_979 - .saturating_add(Weight::from_parts(130_653_903, 0).saturating_mul(s.into())) - // Standard Error: 346_381 - .saturating_add(Weight::from_parts(23_046_001, 0).saturating_mul(n.into())) + // Minimum execution time: 4_019_000 picoseconds. + Weight::from_parts(4_174_000, 990) + // Standard Error: 2_026_537 + .saturating_add(Weight::from_parts(127_217_493, 0).saturating_mul(s.into())) + // Standard Error: 337_157 + .saturating_add(Weight::from_parts(22_199_440, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -344,11 +344,11 @@ impl WeightInfo for SubstrateWeight { fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `474 + s * (36 ±0)` - // Estimated: `21305` - // Minimum execution time: 15_289_000 picoseconds. - Weight::from_parts(21_319_844, 21305) - // Standard Error: 893 - .saturating_add(Weight::from_parts(53_159, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 16_361_000 picoseconds. + Weight::from_parts(20_622_408, 11003) + // Standard Error: 592 + .saturating_add(Weight::from_parts(48_502, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -360,11 +360,11 @@ impl WeightInfo for SubstrateWeight { fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `590 + s * (3 ±0)` - // Estimated: `14582` - // Minimum execution time: 9_867_000 picoseconds. - Weight::from_parts(12_546_245, 14582) - // Standard Error: 509 - .saturating_add(Weight::from_parts(10_078, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 9_676_000 picoseconds. + Weight::from_parts(11_336_454, 11003) + // Standard Error: 240 + .saturating_add(Weight::from_parts(17_924, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -378,11 +378,11 @@ impl WeightInfo for SubstrateWeight { fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `637 + s * (35 ±0)` - // Estimated: `21305` - // Minimum execution time: 19_299_000 picoseconds. - Weight::from_parts(24_125_576, 21305) - // Standard Error: 1_479 - .saturating_add(Weight::from_parts(22_611, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 20_304_000 picoseconds. + Weight::from_parts(22_890_354, 11003) + // Standard Error: 1_568 + .saturating_add(Weight::from_parts(40_002, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -390,16 +390,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// Storage: Identity SubsOf (r:1 w:1) /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:0) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563 + s * (37 ±0)` - // Estimated: `10302` - // Minimum execution time: 14_183_000 picoseconds. - Weight::from_parts(17_343_547, 10302) - // Standard Error: 454 - .saturating_add(Weight::from_parts(45_925, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `703 + s * (37 ±0)` + // Estimated: `6723` + // Minimum execution time: 17_645_000 picoseconds. + Weight::from_parts(20_080_248, 6723) + // Standard Error: 1_331 + .saturating_add(Weight::from_parts(42_353, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } } @@ -413,10 +415,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `31 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 9_094_000 picoseconds. - Weight::from_parts(10_431_627, 2626) - // Standard Error: 1_046 - .saturating_add(Weight::from_parts(99_468, 0).saturating_mul(r.into())) + // Minimum execution time: 8_952_000 picoseconds. + Weight::from_parts(9_493_179, 2626) + // Standard Error: 776 + .saturating_add(Weight::from_parts(95_408, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -428,12 +430,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `441 + r * (5 ±0)` // Estimated: `11003` - // Minimum execution time: 18_662_000 picoseconds. - Weight::from_parts(17_939_760, 11003) - // Standard Error: 2_371 - .saturating_add(Weight::from_parts(22_184, 0).saturating_mul(r.into())) - // Standard Error: 462 - .saturating_add(Weight::from_parts(151_368, 0).saturating_mul(x.into())) + // Minimum execution time: 18_573_000 picoseconds. + Weight::from_parts(16_665_333, 11003) + // Standard Error: 1_927 + .saturating_add(Weight::from_parts(93_600, 0).saturating_mul(r.into())) + // Standard Error: 376 + .saturating_add(Weight::from_parts(134_895, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -447,11 +449,11 @@ impl WeightInfo for () { fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `18716 + s * (2589 ±0)` - // Minimum execution time: 6_921_000 picoseconds. - Weight::from_parts(16_118_195, 18716) - // Standard Error: 1_786 - .saturating_add(Weight::from_parts(1_350_155, 0).saturating_mul(s.into())) + // Estimated: `11003 + s * (2589 ±0)` + // Minimum execution time: 6_669_000 picoseconds. + Weight::from_parts(15_167_928, 11003) + // Standard Error: 1_551 + .saturating_add(Weight::from_parts(1_294_015, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -468,11 +470,11 @@ impl WeightInfo for () { fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `193 + p * (32 ±0)` - // Estimated: `17726` - // Minimum execution time: 6_858_000 picoseconds. - Weight::from_parts(16_222_054, 17726) - // Standard Error: 1_409 - .saturating_add(Weight::from_parts(593_588, 0).saturating_mul(p.into())) + // Estimated: `11003` + // Minimum execution time: 6_642_000 picoseconds. + Weight::from_parts(15_473_100, 11003) + // Standard Error: 1_132 + .saturating_add(Weight::from_parts(592_570, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -489,15 +491,15 @@ impl WeightInfo for () { fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `468 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `17726` - // Minimum execution time: 27_212_000 picoseconds. - Weight::from_parts(19_030_840, 17726) - // Standard Error: 3_118 - .saturating_add(Weight::from_parts(29_836, 0).saturating_mul(r.into())) - // Standard Error: 608 - .saturating_add(Weight::from_parts(590_661, 0).saturating_mul(s.into())) - // Standard Error: 608 - .saturating_add(Weight::from_parts(110_108, 0).saturating_mul(x.into())) + // Estimated: `11003` + // Minimum execution time: 29_150_000 picoseconds. + Weight::from_parts(20_982_965, 11003) + // Standard Error: 2_448 + .saturating_add(Weight::from_parts(17_611, 0).saturating_mul(r.into())) + // Standard Error: 478 + .saturating_add(Weight::from_parts(582_119, 0).saturating_mul(s.into())) + // Standard Error: 478 + .saturating_add(Weight::from_parts(114_020, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -511,13 +513,13 @@ impl WeightInfo for () { fn request_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `366 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `13629` - // Minimum execution time: 19_771_000 picoseconds. - Weight::from_parts(18_917_892, 13629) - // Standard Error: 1_957 - .saturating_add(Weight::from_parts(57_465, 0).saturating_mul(r.into())) - // Standard Error: 381 - .saturating_add(Weight::from_parts(168_586, 0).saturating_mul(x.into())) + // Estimated: `11003` + // Minimum execution time: 20_088_000 picoseconds. + Weight::from_parts(18_524_805, 11003) + // Standard Error: 1_965 + .saturating_add(Weight::from_parts(74_420, 0).saturating_mul(r.into())) + // Standard Error: 383 + .saturating_add(Weight::from_parts(152_570, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -529,12 +531,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `397 + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 17_411_000 picoseconds. - Weight::from_parts(16_856_331, 11003) - // Standard Error: 7_002 - .saturating_add(Weight::from_parts(34_389, 0).saturating_mul(r.into())) - // Standard Error: 1_366 - .saturating_add(Weight::from_parts(165_686, 0).saturating_mul(x.into())) + // Minimum execution time: 17_277_000 picoseconds. + Weight::from_parts(16_760_327, 11003) + // Standard Error: 1_967 + .saturating_add(Weight::from_parts(33_499, 0).saturating_mul(r.into())) + // Standard Error: 383 + .saturating_add(Weight::from_parts(148_237, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -545,10 +547,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_089_000 picoseconds. - Weight::from_parts(7_750_487, 2626) - // Standard Error: 1_625 - .saturating_add(Weight::from_parts(101_135, 0).saturating_mul(r.into())) + // Minimum execution time: 6_566_000 picoseconds. + Weight::from_parts(6_982_669, 2626) + // Standard Error: 613 + .saturating_add(Weight::from_parts(77_476, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -559,10 +561,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_300_000 picoseconds. - Weight::from_parts(6_836_140, 2626) - // Standard Error: 655 - .saturating_add(Weight::from_parts(102_284, 0).saturating_mul(r.into())) + // Minimum execution time: 5_856_000 picoseconds. + Weight::from_parts(6_185_551, 2626) + // Standard Error: 466 + .saturating_add(Weight::from_parts(76_432, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -573,10 +575,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_257_000 picoseconds. - Weight::from_parts(6_917_052, 2626) - // Standard Error: 2_628 - .saturating_add(Weight::from_parts(71_362, 0).saturating_mul(r.into())) + // Minimum execution time: 5_784_000 picoseconds. + Weight::from_parts(6_029_409, 2626) + // Standard Error: 389 + .saturating_add(Weight::from_parts(73_636, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -589,13 +591,13 @@ impl WeightInfo for () { fn provide_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `444 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `13629` - // Minimum execution time: 16_021_000 picoseconds. - Weight::from_parts(15_553_670, 13629) - // Standard Error: 5_797 - .saturating_add(Weight::from_parts(42_423, 0).saturating_mul(r.into())) - // Standard Error: 1_072 - .saturating_add(Weight::from_parts(252_721, 0).saturating_mul(x.into())) + // Estimated: `11003` + // Minimum execution time: 15_250_000 picoseconds. + Weight::from_parts(14_264_575, 11003) + // Standard Error: 1_996 + .saturating_add(Weight::from_parts(45_342, 0).saturating_mul(r.into())) + // Standard Error: 369 + .saturating_add(Weight::from_parts(216_509, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -613,15 +615,15 @@ impl WeightInfo for () { fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `665 + r * (12 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `23922` - // Minimum execution time: 40_801_000 picoseconds. - Weight::from_parts(34_079_397, 23922) - // Standard Error: 3_750 - .saturating_add(Weight::from_parts(31_496, 0).saturating_mul(r.into())) - // Standard Error: 732 - .saturating_add(Weight::from_parts(599_691, 0).saturating_mul(s.into())) - // Standard Error: 732 - .saturating_add(Weight::from_parts(101_683, 0).saturating_mul(x.into())) + // Estimated: `11003` + // Minimum execution time: 43_178_000 picoseconds. + Weight::from_parts(34_379_402, 11003) + // Standard Error: 2_843 + .saturating_add(Weight::from_parts(58_134, 0).saturating_mul(r.into())) + // Standard Error: 555 + .saturating_add(Weight::from_parts(596_395, 0).saturating_mul(s.into())) + // Standard Error: 555 + .saturating_add(Weight::from_parts(113_292, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -634,12 +636,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_412_000 picoseconds. - Weight::from_parts(4_592_000, 0) - // Standard Error: 703_509 - .saturating_add(Weight::from_parts(43_647_925, 0).saturating_mul(x.into())) - // Standard Error: 117_043 - .saturating_add(Weight::from_parts(9_312_431, 0).saturating_mul(n.into())) + // Minimum execution time: 4_093_000 picoseconds. + Weight::from_parts(4_184_000, 0) + // Standard Error: 688_514 + .saturating_add(Weight::from_parts(42_213_609, 0).saturating_mul(x.into())) + // Standard Error: 114_549 + .saturating_add(Weight::from_parts(8_812_982, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) } /// Storage: Identity SubsOf (r:600 w:0) @@ -652,12 +654,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 3_824_000 picoseconds. - Weight::from_parts(3_950_000, 990) - // Standard Error: 2_864 - .saturating_add(Weight::from_parts(55_678, 0).saturating_mul(x.into())) - // Standard Error: 476 - .saturating_add(Weight::from_parts(1_138_349, 0).saturating_mul(n.into())) + // Minimum execution time: 3_998_000 picoseconds. + Weight::from_parts(4_142_000, 990) + // Standard Error: 2_992 + .saturating_add(Weight::from_parts(62_923, 0).saturating_mul(x.into())) + // Standard Error: 497 + .saturating_add(Weight::from_parts(1_122_767, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5733).saturating_mul(n.into())) @@ -672,12 +674,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 4_196_000 picoseconds. - Weight::from_parts(4_340_000, 990) - // Standard Error: 2_081_979 - .saturating_add(Weight::from_parts(130_653_903, 0).saturating_mul(s.into())) - // Standard Error: 346_381 - .saturating_add(Weight::from_parts(23_046_001, 0).saturating_mul(n.into())) + // Minimum execution time: 4_019_000 picoseconds. + Weight::from_parts(4_174_000, 990) + // Standard Error: 2_026_537 + .saturating_add(Weight::from_parts(127_217_493, 0).saturating_mul(s.into())) + // Standard Error: 337_157 + .saturating_add(Weight::from_parts(22_199_440, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -693,11 +695,11 @@ impl WeightInfo for () { fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `474 + s * (36 ±0)` - // Estimated: `21305` - // Minimum execution time: 15_289_000 picoseconds. - Weight::from_parts(21_319_844, 21305) - // Standard Error: 893 - .saturating_add(Weight::from_parts(53_159, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 16_361_000 picoseconds. + Weight::from_parts(20_622_408, 11003) + // Standard Error: 592 + .saturating_add(Weight::from_parts(48_502, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -709,11 +711,11 @@ impl WeightInfo for () { fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `590 + s * (3 ±0)` - // Estimated: `14582` - // Minimum execution time: 9_867_000 picoseconds. - Weight::from_parts(12_546_245, 14582) - // Standard Error: 509 - .saturating_add(Weight::from_parts(10_078, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 9_676_000 picoseconds. + Weight::from_parts(11_336_454, 11003) + // Standard Error: 240 + .saturating_add(Weight::from_parts(17_924, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -727,11 +729,11 @@ impl WeightInfo for () { fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `637 + s * (35 ±0)` - // Estimated: `21305` - // Minimum execution time: 19_299_000 picoseconds. - Weight::from_parts(24_125_576, 21305) - // Standard Error: 1_479 - .saturating_add(Weight::from_parts(22_611, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 20_304_000 picoseconds. + Weight::from_parts(22_890_354, 11003) + // Standard Error: 1_568 + .saturating_add(Weight::from_parts(40_002, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -739,16 +741,18 @@ impl WeightInfo for () { /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) /// Storage: Identity SubsOf (r:1 w:1) /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:0) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563 + s * (37 ±0)` - // Estimated: `10302` - // Minimum execution time: 14_183_000 picoseconds. - Weight::from_parts(17_343_547, 10302) - // Standard Error: 454 - .saturating_add(Weight::from_parts(45_925, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `703 + s * (37 ±0)` + // Estimated: `6723` + // Minimum execution time: 17_645_000 picoseconds. + Weight::from_parts(20_080_248, 6723) + // Standard Error: 1_331 + .saturating_add(Weight::from_parts(42_353, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/pallets/maintenance/src/weights.rs b/pallets/maintenance/src/weights.rs index e76ba6f1bf..889fee5118 100644 --- a/pallets/maintenance/src/weights.rs +++ b/pallets/maintenance/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_maintenance //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -47,8 +47,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_407_000 picoseconds. - Weight::from_parts(4_556_000, 0) + // Minimum execution time: 4_227_000 picoseconds. + Weight::from_parts(4_445_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Maintenance Enabled (r:0 w:1) @@ -57,8 +57,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_868_000 picoseconds. - Weight::from_parts(6_100_000, 0) + // Minimum execution time: 4_381_000 picoseconds. + Weight::from_parts(4_576_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Preimage StatusFor (r:1 w:0) @@ -68,9 +68,9 @@ impl WeightInfo for SubstrateWeight { fn execute_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `209` - // Estimated: `7230` - // Minimum execution time: 14_046_000 picoseconds. - Weight::from_parts(14_419_000, 7230) + // Estimated: `3674` + // Minimum execution time: 10_106_000 picoseconds. + Weight::from_parts(10_311_000, 3674) .saturating_add(T::DbWeight::get().reads(2_u64)) } } @@ -83,8 +83,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_407_000 picoseconds. - Weight::from_parts(4_556_000, 0) + // Minimum execution time: 4_227_000 picoseconds. + Weight::from_parts(4_445_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Maintenance Enabled (r:0 w:1) @@ -93,8 +93,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_868_000 picoseconds. - Weight::from_parts(6_100_000, 0) + // Minimum execution time: 4_381_000 picoseconds. + Weight::from_parts(4_576_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Preimage StatusFor (r:1 w:0) @@ -104,9 +104,9 @@ impl WeightInfo for () { fn execute_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `209` - // Estimated: `7230` - // Minimum execution time: 14_046_000 picoseconds. - Weight::from_parts(14_419_000, 7230) + // Estimated: `3674` + // Minimum execution time: 10_106_000 picoseconds. + Weight::from_parts(10_311_000, 3674) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/pallets/nonfungible/src/weights.rs b/pallets/nonfungible/src/weights.rs index 5c30ca55dc..a98070041c 100644 --- a/pallets/nonfungible/src/weights.rs +++ b/pallets/nonfungible/src/weights.rs @@ -5,7 +5,7 @@ //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` +//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -69,8 +69,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 6_680_000 picoseconds. - Weight::from_parts(6_910_000, 3530) + // Minimum execution time: 14_096_000 picoseconds. + Weight::from_parts(14_490_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -87,10 +87,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 2_120_000 picoseconds. - Weight::from_parts(2_190_000, 3530) - // Standard Error: 1_175 - .saturating_add(Weight::from_parts(3_053_846, 0).saturating_mul(b.into())) + // Minimum execution time: 4_157_000 picoseconds. + Weight::from_parts(2_679_830, 3530) + // Standard Error: 3_040 + .saturating_add(Weight::from_parts(4_359_013, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -108,10 +108,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 2_210_000 picoseconds. - Weight::from_parts(2_280_000, 3481) - // Standard Error: 1_618 - .saturating_add(Weight::from_parts(4_308_375, 0).saturating_mul(b.into())) + // Minimum execution time: 4_271_000 picoseconds. + Weight::from_parts(4_483_000, 3481) + // Standard Error: 2_384 + .saturating_add(Weight::from_parts(5_835_967, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -136,8 +136,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 13_270_000 picoseconds. - Weight::from_parts(13_559_000, 3530) + // Minimum execution time: 23_512_000 picoseconds. + Weight::from_parts(24_018_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -159,8 +159,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 16_680_000 picoseconds. - Weight::from_parts(17_260_000, 3530) + // Minimum execution time: 29_808_000 picoseconds. + Weight::from_parts(30_221_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -185,10 +185,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 16_630_000 picoseconds. - Weight::from_parts(17_030_000, 5874) - // Standard Error: 137_617 - .saturating_add(Weight::from_parts(47_437_920, 0).saturating_mul(b.into())) + // Minimum execution time: 29_778_000 picoseconds. + Weight::from_parts(30_144_000, 5874) + // Standard Error: 211_038 + .saturating_add(Weight::from_parts(71_816_636, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -207,8 +207,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 10_200_000 picoseconds. - Weight::from_parts(10_490_000, 6070) + // Minimum execution time: 17_854_000 picoseconds. + Weight::from_parts(18_244_000, 6070) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -220,8 +220,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 5_730_000 picoseconds. - Weight::from_parts(5_980_000, 3522) + // Minimum execution time: 10_545_000 picoseconds. + Weight::from_parts(10_842_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -233,8 +233,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 5_760_000 picoseconds. - Weight::from_parts(5_990_000, 3522) + // Minimum execution time: 10_536_000 picoseconds. + Weight::from_parts(10_811_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -244,8 +244,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 3_270_000 picoseconds. - Weight::from_parts(3_440_000, 3522) + // Minimum execution time: 5_351_000 picoseconds. + Weight::from_parts(5_501_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -266,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 16_770_000 picoseconds. - Weight::from_parts(17_150_000, 3530) + // Minimum execution time: 28_847_000 picoseconds. + Weight::from_parts(29_327_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -278,10 +278,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 870_000 picoseconds. - Weight::from_parts(910_000, 20191) - // Standard Error: 37_993 - .saturating_add(Weight::from_parts(8_947_342, 0).saturating_mul(b.into())) + // Minimum execution time: 2_006_000 picoseconds. + Weight::from_parts(2_103_000, 20191) + // Standard Error: 46_810 + .saturating_add(Weight::from_parts(11_403_774, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -296,10 +296,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 2_570_000 picoseconds. - Weight::from_parts(10_787_333, 36269) - // Standard Error: 7_619 - .saturating_add(Weight::from_parts(2_939_955, 0).saturating_mul(b.into())) + // Minimum execution time: 5_038_000 picoseconds. + Weight::from_parts(11_734_155, 36269) + // Standard Error: 13_521 + .saturating_add(Weight::from_parts(5_311_979, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -312,10 +312,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 530_000 picoseconds. - Weight::from_parts(5_572_362, 20191) - // Standard Error: 6_997 - .saturating_add(Weight::from_parts(2_986_451, 0).saturating_mul(b.into())) + // Minimum execution time: 1_599_000 picoseconds. + Weight::from_parts(1_659_000, 20191) + // Standard Error: 77_681 + .saturating_add(Weight::from_parts(6_883_549, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -330,10 +330,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 2_580_000 picoseconds. - Weight::from_parts(2_640_000, 36269) - // Standard Error: 25_713 - .saturating_add(Weight::from_parts(9_667_974, 0).saturating_mul(b.into())) + // Minimum execution time: 4_916_000 picoseconds. + Weight::from_parts(5_018_000, 36269) + // Standard Error: 84_636 + .saturating_add(Weight::from_parts(24_588_007, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -343,8 +343,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 2_910_000 picoseconds. - Weight::from_parts(3_020_000, 3522) + // Minimum execution time: 4_667_000 picoseconds. + Weight::from_parts(4_879_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -353,8 +353,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_510_000 picoseconds. - Weight::from_parts(2_660_000, 0) + // Minimum execution time: 6_087_000 picoseconds. + Weight::from_parts(6_270_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -363,8 +363,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 1_860_000 picoseconds. - Weight::from_parts(2_010_000, 3576) + // Minimum execution time: 3_563_000 picoseconds. + Weight::from_parts(3_683_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -373,8 +373,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 2_220_000 picoseconds. - Weight::from_parts(2_350_000, 36269) + // Minimum execution time: 4_005_000 picoseconds. + Weight::from_parts(4_191_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -394,8 +394,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 6_680_000 picoseconds. - Weight::from_parts(6_910_000, 3530) + // Minimum execution time: 14_096_000 picoseconds. + Weight::from_parts(14_490_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -412,10 +412,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 2_120_000 picoseconds. - Weight::from_parts(2_190_000, 3530) - // Standard Error: 1_175 - .saturating_add(Weight::from_parts(3_053_846, 0).saturating_mul(b.into())) + // Minimum execution time: 4_157_000 picoseconds. + Weight::from_parts(2_679_830, 3530) + // Standard Error: 3_040 + .saturating_add(Weight::from_parts(4_359_013, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -433,10 +433,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 2_210_000 picoseconds. - Weight::from_parts(2_280_000, 3481) - // Standard Error: 1_618 - .saturating_add(Weight::from_parts(4_308_375, 0).saturating_mul(b.into())) + // Minimum execution time: 4_271_000 picoseconds. + Weight::from_parts(4_483_000, 3481) + // Standard Error: 2_384 + .saturating_add(Weight::from_parts(5_835_967, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -461,8 +461,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 13_270_000 picoseconds. - Weight::from_parts(13_559_000, 3530) + // Minimum execution time: 23_512_000 picoseconds. + Weight::from_parts(24_018_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -484,8 +484,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 16_680_000 picoseconds. - Weight::from_parts(17_260_000, 3530) + // Minimum execution time: 29_808_000 picoseconds. + Weight::from_parts(30_221_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -510,10 +510,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 16_630_000 picoseconds. - Weight::from_parts(17_030_000, 5874) - // Standard Error: 137_617 - .saturating_add(Weight::from_parts(47_437_920, 0).saturating_mul(b.into())) + // Minimum execution time: 29_778_000 picoseconds. + Weight::from_parts(30_144_000, 5874) + // Standard Error: 211_038 + .saturating_add(Weight::from_parts(71_816_636, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -532,8 +532,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 10_200_000 picoseconds. - Weight::from_parts(10_490_000, 6070) + // Minimum execution time: 17_854_000 picoseconds. + Weight::from_parts(18_244_000, 6070) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -545,8 +545,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 5_730_000 picoseconds. - Weight::from_parts(5_980_000, 3522) + // Minimum execution time: 10_545_000 picoseconds. + Weight::from_parts(10_842_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -558,8 +558,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 5_760_000 picoseconds. - Weight::from_parts(5_990_000, 3522) + // Minimum execution time: 10_536_000 picoseconds. + Weight::from_parts(10_811_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -569,8 +569,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 3_270_000 picoseconds. - Weight::from_parts(3_440_000, 3522) + // Minimum execution time: 5_351_000 picoseconds. + Weight::from_parts(5_501_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -591,8 +591,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 16_770_000 picoseconds. - Weight::from_parts(17_150_000, 3530) + // Minimum execution time: 28_847_000 picoseconds. + Weight::from_parts(29_327_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -603,10 +603,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 870_000 picoseconds. - Weight::from_parts(910_000, 20191) - // Standard Error: 37_993 - .saturating_add(Weight::from_parts(8_947_342, 0).saturating_mul(b.into())) + // Minimum execution time: 2_006_000 picoseconds. + Weight::from_parts(2_103_000, 20191) + // Standard Error: 46_810 + .saturating_add(Weight::from_parts(11_403_774, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -621,10 +621,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 2_570_000 picoseconds. - Weight::from_parts(10_787_333, 36269) - // Standard Error: 7_619 - .saturating_add(Weight::from_parts(2_939_955, 0).saturating_mul(b.into())) + // Minimum execution time: 5_038_000 picoseconds. + Weight::from_parts(11_734_155, 36269) + // Standard Error: 13_521 + .saturating_add(Weight::from_parts(5_311_979, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -637,10 +637,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 530_000 picoseconds. - Weight::from_parts(5_572_362, 20191) - // Standard Error: 6_997 - .saturating_add(Weight::from_parts(2_986_451, 0).saturating_mul(b.into())) + // Minimum execution time: 1_599_000 picoseconds. + Weight::from_parts(1_659_000, 20191) + // Standard Error: 77_681 + .saturating_add(Weight::from_parts(6_883_549, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -655,10 +655,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 2_580_000 picoseconds. - Weight::from_parts(2_640_000, 36269) - // Standard Error: 25_713 - .saturating_add(Weight::from_parts(9_667_974, 0).saturating_mul(b.into())) + // Minimum execution time: 4_916_000 picoseconds. + Weight::from_parts(5_018_000, 36269) + // Standard Error: 84_636 + .saturating_add(Weight::from_parts(24_588_007, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -668,8 +668,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 2_910_000 picoseconds. - Weight::from_parts(3_020_000, 3522) + // Minimum execution time: 4_667_000 picoseconds. + Weight::from_parts(4_879_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -678,8 +678,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_510_000 picoseconds. - Weight::from_parts(2_660_000, 0) + // Minimum execution time: 6_087_000 picoseconds. + Weight::from_parts(6_270_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -688,8 +688,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 1_860_000 picoseconds. - Weight::from_parts(2_010_000, 3576) + // Minimum execution time: 3_563_000 picoseconds. + Weight::from_parts(3_683_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -698,8 +698,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 2_220_000 picoseconds. - Weight::from_parts(2_350_000, 36269) + // Minimum execution time: 4_005_000 picoseconds. + Weight::from_parts(4_191_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/refungible/src/weights.rs b/pallets/refungible/src/weights.rs index db0fb85d90..52e1f1fa58 100644 --- a/pallets/refungible/src/weights.rs +++ b/pallets/refungible/src/weights.rs @@ -5,7 +5,7 @@ //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` +//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -78,8 +78,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 7_490_000 picoseconds. - Weight::from_parts(7_820_000, 3530) + // Minimum execution time: 16_590_000 picoseconds. + Weight::from_parts(17_009_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -98,10 +98,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 1_570_000 picoseconds. - Weight::from_parts(1_630_000, 3530) - // Standard Error: 1_544 - .saturating_add(Weight::from_parts(4_082_425, 0).saturating_mul(b.into())) + // Minimum execution time: 3_775_000 picoseconds. + Weight::from_parts(3_879_000, 3530) + // Standard Error: 3_161 + .saturating_add(Weight::from_parts(5_906_642, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -121,10 +121,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_620_000 picoseconds. - Weight::from_parts(1_690_000, 3481) - // Standard Error: 1_349 - .saturating_add(Weight::from_parts(5_124_357, 0).saturating_mul(b.into())) + // Minimum execution time: 3_776_000 picoseconds. + Weight::from_parts(3_877_000, 3481) + // Standard Error: 2_805 + .saturating_add(Weight::from_parts(7_369_476, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -146,10 +146,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 2_570_000 picoseconds. - Weight::from_parts(2_680_000, 3481) - // Standard Error: 637 - .saturating_add(Weight::from_parts(3_753_594, 0).saturating_mul(b.into())) + // Minimum execution time: 6_070_000 picoseconds. + Weight::from_parts(5_715_254, 3481) + // Standard Error: 3_252 + .saturating_add(Weight::from_parts(5_385_888, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -168,8 +168,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 16_200_000 picoseconds. - Weight::from_parts(16_620_000, 8682) + // Minimum execution time: 28_877_000 picoseconds. + Weight::from_parts(29_326_000, 8682) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -189,8 +189,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 15_820_000 picoseconds. - Weight::from_parts(16_410_000, 3554) + // Minimum execution time: 27_570_000 picoseconds. + Weight::from_parts(28_241_000, 3554) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -202,8 +202,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 9_970_000 picoseconds. - Weight::from_parts(10_250_000, 6118) + // Minimum execution time: 17_508_000 picoseconds. + Weight::from_parts(17_819_000, 6118) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -219,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 11_530_000 picoseconds. - Weight::from_parts(11_800_000, 6118) + // Minimum execution time: 20_442_000 picoseconds. + Weight::from_parts(20_782_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -236,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 13_520_000 picoseconds. - Weight::from_parts(13_860_000, 6118) + // Minimum execution time: 23_151_000 picoseconds. + Weight::from_parts(23_518_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -253,8 +253,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 13_290_000 picoseconds. - Weight::from_parts(13_590_000, 6118) + // Minimum execution time: 22_861_000 picoseconds. + Weight::from_parts(23_178_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -266,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 5_910_000 picoseconds. - Weight::from_parts(6_070_000, 3554) + // Minimum execution time: 11_395_000 picoseconds. + Weight::from_parts(11_844_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -279,8 +279,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 5_920_000 picoseconds. - Weight::from_parts(6_100_000, 3554) + // Minimum execution time: 11_977_000 picoseconds. + Weight::from_parts(12_217_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -294,8 +294,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 14_460_000 picoseconds. - Weight::from_parts(14_900_000, 6118) + // Minimum execution time: 25_243_000 picoseconds. + Weight::from_parts(26_153_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -313,8 +313,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 15_920_000 picoseconds. - Weight::from_parts(16_340_000, 6118) + // Minimum execution time: 28_042_000 picoseconds. + Weight::from_parts(28_499_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -332,8 +332,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 18_010_000 picoseconds. - Weight::from_parts(18_350_000, 6118) + // Minimum execution time: 30_539_000 picoseconds. + Weight::from_parts(30_914_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -351,8 +351,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 17_800_000 picoseconds. - Weight::from_parts(18_160_000, 6118) + // Minimum execution time: 30_454_000 picoseconds. + Weight::from_parts(30_892_000, 6118) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -374,8 +374,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 21_020_000 picoseconds. - Weight::from_parts(21_450_000, 3570) + // Minimum execution time: 35_479_000 picoseconds. + Weight::from_parts(35_928_000, 3570) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -386,10 +386,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 790_000 picoseconds. - Weight::from_parts(840_000, 20191) - // Standard Error: 35_436 - .saturating_add(Weight::from_parts(8_505_258, 0).saturating_mul(b.into())) + // Minimum execution time: 2_013_000 picoseconds. + Weight::from_parts(2_086_000, 20191) + // Standard Error: 45_248 + .saturating_add(Weight::from_parts(11_351_115, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -404,10 +404,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 1_710_000 picoseconds. - Weight::from_parts(5_892_635, 36269) - // Standard Error: 5_857 - .saturating_add(Weight::from_parts(2_956_238, 0).saturating_mul(b.into())) + // Minimum execution time: 3_998_000 picoseconds. + Weight::from_parts(10_420_739, 36269) + // Standard Error: 40_334 + .saturating_add(Weight::from_parts(5_989_594, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -420,10 +420,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 500_000 picoseconds. - Weight::from_parts(7_856_522, 20191) - // Standard Error: 7_896 - .saturating_add(Weight::from_parts(2_770_333, 0).saturating_mul(b.into())) + // Minimum execution time: 1_541_000 picoseconds. + Weight::from_parts(4_545_643, 20191) + // Standard Error: 33_290 + .saturating_add(Weight::from_parts(5_626_340, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -438,10 +438,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 1_710_000 picoseconds. - Weight::from_parts(1_790_000, 36269) - // Standard Error: 23_937 - .saturating_add(Weight::from_parts(9_102_744, 0).saturating_mul(b.into())) + // Minimum execution time: 3_768_000 picoseconds. + Weight::from_parts(3_906_000, 36269) + // Standard Error: 97_278 + .saturating_add(Weight::from_parts(24_661_474, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -453,8 +453,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 7_540_000 picoseconds. - Weight::from_parts(7_740_000, 3554) + // Minimum execution time: 13_554_000 picoseconds. + Weight::from_parts(13_880_000, 3554) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -464,8 +464,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 3_050_000 picoseconds. - Weight::from_parts(3_230_000, 6118) + // Minimum execution time: 6_821_000 picoseconds. + Weight::from_parts(7_027_000, 6118) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -474,8 +474,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_560_000 picoseconds. - Weight::from_parts(2_680_000, 0) + // Minimum execution time: 6_084_000 picoseconds. + Weight::from_parts(6_285_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -484,8 +484,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 1_430_000 picoseconds. - Weight::from_parts(1_510_000, 3576) + // Minimum execution time: 2_938_000 picoseconds. + Weight::from_parts(3_031_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -494,8 +494,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 1_380_000 picoseconds. - Weight::from_parts(1_470_000, 36269) + // Minimum execution time: 2_892_000 picoseconds. + Weight::from_parts(3_056_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -517,8 +517,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 7_490_000 picoseconds. - Weight::from_parts(7_820_000, 3530) + // Minimum execution time: 16_590_000 picoseconds. + Weight::from_parts(17_009_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -537,10 +537,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 1_570_000 picoseconds. - Weight::from_parts(1_630_000, 3530) - // Standard Error: 1_544 - .saturating_add(Weight::from_parts(4_082_425, 0).saturating_mul(b.into())) + // Minimum execution time: 3_775_000 picoseconds. + Weight::from_parts(3_879_000, 3530) + // Standard Error: 3_161 + .saturating_add(Weight::from_parts(5_906_642, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -560,10 +560,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_620_000 picoseconds. - Weight::from_parts(1_690_000, 3481) - // Standard Error: 1_349 - .saturating_add(Weight::from_parts(5_124_357, 0).saturating_mul(b.into())) + // Minimum execution time: 3_776_000 picoseconds. + Weight::from_parts(3_877_000, 3481) + // Standard Error: 2_805 + .saturating_add(Weight::from_parts(7_369_476, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -585,10 +585,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 2_570_000 picoseconds. - Weight::from_parts(2_680_000, 3481) - // Standard Error: 637 - .saturating_add(Weight::from_parts(3_753_594, 0).saturating_mul(b.into())) + // Minimum execution time: 6_070_000 picoseconds. + Weight::from_parts(5_715_254, 3481) + // Standard Error: 3_252 + .saturating_add(Weight::from_parts(5_385_888, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -607,8 +607,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 16_200_000 picoseconds. - Weight::from_parts(16_620_000, 8682) + // Minimum execution time: 28_877_000 picoseconds. + Weight::from_parts(29_326_000, 8682) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -628,8 +628,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 15_820_000 picoseconds. - Weight::from_parts(16_410_000, 3554) + // Minimum execution time: 27_570_000 picoseconds. + Weight::from_parts(28_241_000, 3554) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -641,8 +641,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 9_970_000 picoseconds. - Weight::from_parts(10_250_000, 6118) + // Minimum execution time: 17_508_000 picoseconds. + Weight::from_parts(17_819_000, 6118) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -658,8 +658,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 11_530_000 picoseconds. - Weight::from_parts(11_800_000, 6118) + // Minimum execution time: 20_442_000 picoseconds. + Weight::from_parts(20_782_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -675,8 +675,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 13_520_000 picoseconds. - Weight::from_parts(13_860_000, 6118) + // Minimum execution time: 23_151_000 picoseconds. + Weight::from_parts(23_518_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -692,8 +692,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 13_290_000 picoseconds. - Weight::from_parts(13_590_000, 6118) + // Minimum execution time: 22_861_000 picoseconds. + Weight::from_parts(23_178_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -705,8 +705,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 5_910_000 picoseconds. - Weight::from_parts(6_070_000, 3554) + // Minimum execution time: 11_395_000 picoseconds. + Weight::from_parts(11_844_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -718,8 +718,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 5_920_000 picoseconds. - Weight::from_parts(6_100_000, 3554) + // Minimum execution time: 11_977_000 picoseconds. + Weight::from_parts(12_217_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -733,8 +733,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 14_460_000 picoseconds. - Weight::from_parts(14_900_000, 6118) + // Minimum execution time: 25_243_000 picoseconds. + Weight::from_parts(26_153_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -752,8 +752,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 15_920_000 picoseconds. - Weight::from_parts(16_340_000, 6118) + // Minimum execution time: 28_042_000 picoseconds. + Weight::from_parts(28_499_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -771,8 +771,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 18_010_000 picoseconds. - Weight::from_parts(18_350_000, 6118) + // Minimum execution time: 30_539_000 picoseconds. + Weight::from_parts(30_914_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -790,8 +790,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 17_800_000 picoseconds. - Weight::from_parts(18_160_000, 6118) + // Minimum execution time: 30_454_000 picoseconds. + Weight::from_parts(30_892_000, 6118) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -813,8 +813,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 21_020_000 picoseconds. - Weight::from_parts(21_450_000, 3570) + // Minimum execution time: 35_479_000 picoseconds. + Weight::from_parts(35_928_000, 3570) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -825,10 +825,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 790_000 picoseconds. - Weight::from_parts(840_000, 20191) - // Standard Error: 35_436 - .saturating_add(Weight::from_parts(8_505_258, 0).saturating_mul(b.into())) + // Minimum execution time: 2_013_000 picoseconds. + Weight::from_parts(2_086_000, 20191) + // Standard Error: 45_248 + .saturating_add(Weight::from_parts(11_351_115, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -843,10 +843,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 1_710_000 picoseconds. - Weight::from_parts(5_892_635, 36269) - // Standard Error: 5_857 - .saturating_add(Weight::from_parts(2_956_238, 0).saturating_mul(b.into())) + // Minimum execution time: 3_998_000 picoseconds. + Weight::from_parts(10_420_739, 36269) + // Standard Error: 40_334 + .saturating_add(Weight::from_parts(5_989_594, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -859,10 +859,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 500_000 picoseconds. - Weight::from_parts(7_856_522, 20191) - // Standard Error: 7_896 - .saturating_add(Weight::from_parts(2_770_333, 0).saturating_mul(b.into())) + // Minimum execution time: 1_541_000 picoseconds. + Weight::from_parts(4_545_643, 20191) + // Standard Error: 33_290 + .saturating_add(Weight::from_parts(5_626_340, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -877,10 +877,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 1_710_000 picoseconds. - Weight::from_parts(1_790_000, 36269) - // Standard Error: 23_937 - .saturating_add(Weight::from_parts(9_102_744, 0).saturating_mul(b.into())) + // Minimum execution time: 3_768_000 picoseconds. + Weight::from_parts(3_906_000, 36269) + // Standard Error: 97_278 + .saturating_add(Weight::from_parts(24_661_474, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -892,8 +892,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 7_540_000 picoseconds. - Weight::from_parts(7_740_000, 3554) + // Minimum execution time: 13_554_000 picoseconds. + Weight::from_parts(13_880_000, 3554) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -903,8 +903,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 3_050_000 picoseconds. - Weight::from_parts(3_230_000, 6118) + // Minimum execution time: 6_821_000 picoseconds. + Weight::from_parts(7_027_000, 6118) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -913,8 +913,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_560_000 picoseconds. - Weight::from_parts(2_680_000, 0) + // Minimum execution time: 6_084_000 picoseconds. + Weight::from_parts(6_285_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -923,8 +923,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 1_430_000 picoseconds. - Weight::from_parts(1_510_000, 3576) + // Minimum execution time: 2_938_000 picoseconds. + Weight::from_parts(3_031_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -933,8 +933,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 1_380_000 picoseconds. - Weight::from_parts(1_470_000, 36269) + // Minimum execution time: 2_892_000 picoseconds. + Weight::from_parts(3_056_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/structure/src/weights.rs b/pallets/structure/src/weights.rs index b871bae467..b0f9e385b6 100644 --- a/pallets/structure/src/weights.rs +++ b/pallets/structure/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_structure //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -45,10 +45,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) fn find_parent() -> Weight { // Proof Size summary in bytes: - // Measured: `634` - // Estimated: `7847` - // Minimum execution time: 10_781_000 picoseconds. - Weight::from_parts(11_675_000, 7847) + // Measured: `667` + // Estimated: `4325` + // Minimum execution time: 9_310_000 picoseconds. + Weight::from_parts(9_551_000, 4325) .saturating_add(T::DbWeight::get().reads(2_u64)) } } @@ -61,10 +61,10 @@ impl WeightInfo for () { /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) fn find_parent() -> Weight { // Proof Size summary in bytes: - // Measured: `634` - // Estimated: `7847` - // Minimum execution time: 10_781_000 picoseconds. - Weight::from_parts(11_675_000, 7847) + // Measured: `667` + // Estimated: `4325` + // Minimum execution time: 9_310_000 picoseconds. + Weight::from_parts(9_551_000, 4325) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/pallets/unique/src/weights.rs b/pallets/unique/src/weights.rs index f13241f32b..fcca221500 100644 --- a/pallets/unique/src/weights.rs +++ b/pallets/unique/src/weights.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_unique //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -57,6 +57,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common DestroyedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: System Account (r:2 w:2) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Common AdminAmount (r:0 w:1) + /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:0 w:1) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Common CollectionProperties (r:0 w:1) @@ -66,11 +68,11 @@ impl WeightInfo for SubstrateWeight { fn create_collection() -> Weight { // Proof Size summary in bytes: // Measured: `245` - // Estimated: `9174` - // Minimum execution time: 31_198_000 picoseconds. - Weight::from_parts(32_046_000, 9174) + // Estimated: `6196` + // Minimum execution time: 32_963_000 picoseconds. + Weight::from_parts(33_785_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: Common CollectionById (r:1 w:1) /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) @@ -88,10 +90,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) fn destroy_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `1086` - // Estimated: `9336` - // Minimum execution time: 48_208_000 picoseconds. - Weight::from_parts(49_031_000, 9336) + // Measured: `1200` + // Estimated: `4325` + // Minimum execution time: 46_962_000 picoseconds. + Weight::from_parts(47_997_000, 4325) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -101,10 +103,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) fn add_to_allow_list() -> Weight { // Proof Size summary in bytes: - // Measured: `967` + // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 14_852_000 picoseconds. - Weight::from_parts(15_268_000, 4325) + // Minimum execution time: 13_657_000 picoseconds. + Weight::from_parts(13_870_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -114,10 +116,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) fn remove_from_allow_list() -> Weight { // Proof Size summary in bytes: - // Measured: `1000` + // Measured: `1033` // Estimated: `4325` - // Minimum execution time: 14_595_000 picoseconds. - Weight::from_parts(14_933_000, 4325) + // Minimum execution time: 13_162_000 picoseconds. + Weight::from_parts(13_458_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -125,10 +127,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn change_collection_owner() -> Weight { // Proof Size summary in bytes: - // Measured: `967` + // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 14_132_000 picoseconds. - Weight::from_parts(14_501_000, 4325) + // Minimum execution time: 12_614_000 picoseconds. + Weight::from_parts(12_968_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -140,10 +142,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) fn add_collection_admin() -> Weight { // Proof Size summary in bytes: - // Measured: `967` - // Estimated: `11349` - // Minimum execution time: 17_229_000 picoseconds. - Weight::from_parts(17_657_000, 11349) + // Measured: `1012` + // Estimated: `4325` + // Minimum execution time: 16_794_000 picoseconds. + Weight::from_parts(17_161_000, 4325) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -156,9 +158,9 @@ impl WeightInfo for SubstrateWeight { fn remove_collection_admin() -> Weight { // Proof Size summary in bytes: // Measured: `1107` - // Estimated: `11349` - // Minimum execution time: 19_827_000 picoseconds. - Weight::from_parts(20_479_000, 11349) + // Estimated: `4325` + // Minimum execution time: 18_145_000 picoseconds. + Weight::from_parts(18_527_000, 4325) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -166,10 +168,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn set_collection_sponsor() -> Weight { // Proof Size summary in bytes: - // Measured: `967` + // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 14_049_000 picoseconds. - Weight::from_parts(14_420_000, 4325) + // Minimum execution time: 12_649_000 picoseconds. + Weight::from_parts(12_953_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -177,10 +179,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn confirm_sponsorship() -> Weight { // Proof Size summary in bytes: - // Measured: `999` + // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 13_689_000 picoseconds. - Weight::from_parts(14_044_000, 4325) + // Minimum execution time: 12_310_000 picoseconds. + Weight::from_parts(12_578_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -188,10 +190,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn remove_collection_sponsor() -> Weight { // Proof Size summary in bytes: - // Measured: `999` + // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 13_275_000 picoseconds. - Weight::from_parts(13_598_000, 4325) + // Minimum execution time: 12_312_000 picoseconds. + Weight::from_parts(12_567_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -199,10 +201,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn set_transfers_enabled_flag() -> Weight { // Proof Size summary in bytes: - // Measured: `967` + // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 9_411_000 picoseconds. - Weight::from_parts(9_706_000, 4325) + // Minimum execution time: 8_315_000 picoseconds. + Weight::from_parts(8_489_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -210,10 +212,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn set_collection_limits() -> Weight { // Proof Size summary in bytes: - // Measured: `967` + // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 13_864_000 picoseconds. - Weight::from_parts(14_368_000, 4325) + // Minimum execution time: 13_135_000 picoseconds. + Weight::from_parts(13_484_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -221,10 +223,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) fn force_repair_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `265` + // Measured: `298` // Estimated: `44457` - // Minimum execution time: 7_104_000 picoseconds. - Weight::from_parts(7_293_000, 44457) + // Minimum execution time: 6_462_000 picoseconds. + Weight::from_parts(6_664_000, 44457) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -238,6 +240,8 @@ impl WeightInfo for () { /// Proof: Common DestroyedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) /// Storage: System Account (r:2 w:2) /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Common AdminAmount (r:0 w:1) + /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:0 w:1) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Common CollectionProperties (r:0 w:1) @@ -247,11 +251,11 @@ impl WeightInfo for () { fn create_collection() -> Weight { // Proof Size summary in bytes: // Measured: `245` - // Estimated: `9174` - // Minimum execution time: 31_198_000 picoseconds. - Weight::from_parts(32_046_000, 9174) + // Estimated: `6196` + // Minimum execution time: 32_963_000 picoseconds. + Weight::from_parts(33_785_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: Common CollectionById (r:1 w:1) /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) @@ -269,10 +273,10 @@ impl WeightInfo for () { /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) fn destroy_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `1086` - // Estimated: `9336` - // Minimum execution time: 48_208_000 picoseconds. - Weight::from_parts(49_031_000, 9336) + // Measured: `1200` + // Estimated: `4325` + // Minimum execution time: 46_962_000 picoseconds. + Weight::from_parts(47_997_000, 4325) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -282,10 +286,10 @@ impl WeightInfo for () { /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) fn add_to_allow_list() -> Weight { // Proof Size summary in bytes: - // Measured: `967` + // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 14_852_000 picoseconds. - Weight::from_parts(15_268_000, 4325) + // Minimum execution time: 13_657_000 picoseconds. + Weight::from_parts(13_870_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -295,10 +299,10 @@ impl WeightInfo for () { /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) fn remove_from_allow_list() -> Weight { // Proof Size summary in bytes: - // Measured: `1000` + // Measured: `1033` // Estimated: `4325` - // Minimum execution time: 14_595_000 picoseconds. - Weight::from_parts(14_933_000, 4325) + // Minimum execution time: 13_162_000 picoseconds. + Weight::from_parts(13_458_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -306,10 +310,10 @@ impl WeightInfo for () { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn change_collection_owner() -> Weight { // Proof Size summary in bytes: - // Measured: `967` + // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 14_132_000 picoseconds. - Weight::from_parts(14_501_000, 4325) + // Minimum execution time: 12_614_000 picoseconds. + Weight::from_parts(12_968_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -321,10 +325,10 @@ impl WeightInfo for () { /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) fn add_collection_admin() -> Weight { // Proof Size summary in bytes: - // Measured: `967` - // Estimated: `11349` - // Minimum execution time: 17_229_000 picoseconds. - Weight::from_parts(17_657_000, 11349) + // Measured: `1012` + // Estimated: `4325` + // Minimum execution time: 16_794_000 picoseconds. + Weight::from_parts(17_161_000, 4325) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -337,9 +341,9 @@ impl WeightInfo for () { fn remove_collection_admin() -> Weight { // Proof Size summary in bytes: // Measured: `1107` - // Estimated: `11349` - // Minimum execution time: 19_827_000 picoseconds. - Weight::from_parts(20_479_000, 11349) + // Estimated: `4325` + // Minimum execution time: 18_145_000 picoseconds. + Weight::from_parts(18_527_000, 4325) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -347,10 +351,10 @@ impl WeightInfo for () { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn set_collection_sponsor() -> Weight { // Proof Size summary in bytes: - // Measured: `967` + // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 14_049_000 picoseconds. - Weight::from_parts(14_420_000, 4325) + // Minimum execution time: 12_649_000 picoseconds. + Weight::from_parts(12_953_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -358,10 +362,10 @@ impl WeightInfo for () { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn confirm_sponsorship() -> Weight { // Proof Size summary in bytes: - // Measured: `999` + // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 13_689_000 picoseconds. - Weight::from_parts(14_044_000, 4325) + // Minimum execution time: 12_310_000 picoseconds. + Weight::from_parts(12_578_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -369,10 +373,10 @@ impl WeightInfo for () { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn remove_collection_sponsor() -> Weight { // Proof Size summary in bytes: - // Measured: `999` + // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 13_275_000 picoseconds. - Weight::from_parts(13_598_000, 4325) + // Minimum execution time: 12_312_000 picoseconds. + Weight::from_parts(12_567_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -380,10 +384,10 @@ impl WeightInfo for () { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn set_transfers_enabled_flag() -> Weight { // Proof Size summary in bytes: - // Measured: `967` + // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 9_411_000 picoseconds. - Weight::from_parts(9_706_000, 4325) + // Minimum execution time: 8_315_000 picoseconds. + Weight::from_parts(8_489_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -391,10 +395,10 @@ impl WeightInfo for () { /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) fn set_collection_limits() -> Weight { // Proof Size summary in bytes: - // Measured: `967` + // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 13_864_000 picoseconds. - Weight::from_parts(14_368_000, 4325) + // Minimum execution time: 13_135_000 picoseconds. + Weight::from_parts(13_484_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -402,10 +406,10 @@ impl WeightInfo for () { /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) fn force_repair_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `265` + // Measured: `298` // Estimated: `44457` - // Minimum execution time: 7_104_000 picoseconds. - Weight::from_parts(7_293_000, 44457) + // Minimum execution time: 6_462_000 picoseconds. + Weight::from_parts(6_664_000, 44457) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/runtime/common/weights/xcm.rs b/runtime/common/weights/xcm.rs index fb48693c6c..7d1dd12a73 100644 --- a/runtime/common/weights/xcm.rs +++ b/runtime/common/weights/xcm.rs @@ -3,7 +3,7 @@ //! Autogenerated weights for pallet_xcm //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-20, STEPS: `50`, REPEAT: 80, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: 80, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 @@ -47,10 +47,10 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `211` - // Estimated: `10460` - // Minimum execution time: 17_089_000 picoseconds. - Weight::from_parts(17_615_000, 10460) + // Measured: `278` + // Estimated: `3743` + // Minimum execution time: 16_743_000 picoseconds. + Weight::from_parts(17_221_000, 3743) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -58,28 +58,28 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `136` + // Measured: `169` // Estimated: `1489` - // Minimum execution time: 14_443_000 picoseconds. - Weight::from_parts(14_895_000, 1489) + // Minimum execution time: 13_575_000 picoseconds. + Weight::from_parts(13_972_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: ParachainInfo ParachainId (r:1 w:0) /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `136` + // Measured: `169` // Estimated: `1489` - // Minimum execution time: 14_340_000 picoseconds. - Weight::from_parts(14_748_000, 1489) + // Minimum execution time: 13_540_000 picoseconds. + Weight::from_parts(13_855_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_266_000 picoseconds. - Weight::from_parts(5_430_000, 0) + // Minimum execution time: 5_101_000 picoseconds. + Weight::from_parts(5_262_000, 0) } /// Storage: PolkadotXcm SupportedVersion (r:0 w:1) /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) @@ -87,8 +87,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_621_000 picoseconds. - Weight::from_parts(5_888_000, 0) + // Minimum execution time: 5_433_000 picoseconds. + Weight::from_parts(5_609_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) @@ -97,8 +97,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_087_000 picoseconds. - Weight::from_parts(2_218_000, 0) + // Minimum execution time: 1_748_000 picoseconds. + Weight::from_parts(1_870_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) @@ -119,10 +119,10 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `211` - // Estimated: `16043` - // Minimum execution time: 21_067_000 picoseconds. - Weight::from_parts(21_466_000, 16043) + // Measured: `278` + // Estimated: `3743` + // Minimum execution time: 20_053_000 picoseconds. + Weight::from_parts(20_382_000, 3743) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -142,21 +142,31 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `394` - // Estimated: `15628` - // Minimum execution time: 23_986_000 picoseconds. - Weight::from_parts(25_328_000, 15628) + // Measured: `461` + // Estimated: `3926` + // Minimum execution time: 22_404_000 picoseconds. + Weight::from_parts(22_801_000, 3926) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } + /// Storage: PolkadotXcm XcmExecutionSuspended (r:0 w:1) + /// Proof Skipped: PolkadotXcm XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + fn force_suspension() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_839_000 picoseconds. + Weight::from_parts(1_954_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } /// Storage: PolkadotXcm SupportedVersion (r:4 w:2) /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `131` - // Estimated: `11021` - // Minimum execution time: 15_073_000 picoseconds. - Weight::from_parts(15_451_000, 11021) + // Measured: `196` + // Estimated: `11086` + // Minimum execution time: 14_147_000 picoseconds. + Weight::from_parts(14_492_000, 11086) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -164,10 +174,10 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `135` - // Estimated: `11025` - // Minimum execution time: 14_840_000 picoseconds. - Weight::from_parts(15_347_000, 11025) + // Measured: `200` + // Estimated: `11090` + // Minimum execution time: 14_046_000 picoseconds. + Weight::from_parts(14_424_000, 11090) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -175,10 +185,10 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `13507` - // Minimum execution time: 16_215_000 picoseconds. - Weight::from_parts(16_461_000, 13507) + // Measured: `207` + // Estimated: `13572` + // Minimum execution time: 15_314_000 picoseconds. + Weight::from_parts(15_624_000, 13572) .saturating_add(T::DbWeight::get().reads(5_u64)) } /// Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) @@ -195,10 +205,10 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `278` - // Estimated: `17013` - // Minimum execution time: 21_705_000 picoseconds. - Weight::from_parts(22_313_000, 17013) + // Measured: `345` + // Estimated: `6285` + // Minimum execution time: 20_624_000 picoseconds. + Weight::from_parts(20_928_000, 6285) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -206,20 +216,20 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `8587` - // Minimum execution time: 7_869_000 picoseconds. - Weight::from_parts(8_052_000, 8587) + // Measured: `239` + // Estimated: `8654` + // Minimum execution time: 7_429_000 picoseconds. + Weight::from_parts(7_661_000, 8654) .saturating_add(T::DbWeight::get().reads(3_u64)) } /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `11032` - // Minimum execution time: 15_340_000 picoseconds. - Weight::from_parts(15_738_000, 11032) + // Measured: `207` + // Estimated: `11097` + // Minimum execution time: 14_558_000 picoseconds. + Weight::from_parts(14_877_000, 11097) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -237,16 +247,12 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `284` - // Estimated: `21999` - // Minimum execution time: 27_809_000 picoseconds. - Weight::from_parts(28_290_000, 21999) + // Measured: `349` + // Estimated: `11239` + // Minimum execution time: 25_587_000 picoseconds. + Weight::from_parts(26_111_000, 11239) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - - fn force_suspension() -> Weight { - Default::default() - } } From cb1a9d813685052def7846f43c6ff1edc4f1b0f2 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 26 Sep 2023 15:17:01 +0200 Subject: [PATCH 043/143] fix: calibrate weights --- primitives/common/src/constants.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/common/src/constants.rs b/primitives/common/src/constants.rs index 3428c8df3c..5ae2b3e6f1 100644 --- a/primitives/common/src/constants.rs +++ b/primitives/common/src/constants.rs @@ -52,10 +52,10 @@ pub const MAX_COLLATORS: u32 = 10; pub const SESSION_LENGTH: BlockNumber = HOURS; // Targeting 0.1 UNQ per transfer -pub const WEIGHT_TO_FEE_COEFF: u64 = /**/76_840_511_488_584_762/**/; +pub const WEIGHT_TO_FEE_COEFF: u64 = /**/76_902_456_736_428_438/**/; // Targeting 0.15 UNQ per transfer via ETH -pub const MIN_GAS_PRICE: u64 = /**/1_906_626_161_453/**/; +pub const MIN_GAS_PRICE: u64 = /**/1_908_931_253_022/**/; /// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. /// This is used to limit the maximal weight of a single extrinsic. From 7cc9052442745b7513abd7af94d5ba6877c717f1 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 26 Sep 2023 16:06:42 +0200 Subject: [PATCH 044/143] fix: bench profile production --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 856c6adc31..e920b2b077 100644 --- a/Makefile +++ b/Makefile @@ -90,7 +90,7 @@ evm_stubs: UniqueFungible UniqueNFT UniqueRefungible UniqueRefungibleToken Contr .PHONY: _bench _bench: - cargo run --release --features runtime-benchmarks,$(RUNTIME) -- \ + cargo run --profile production --features runtime-benchmarks,$(RUNTIME) -- \ benchmark pallet --pallet pallet-$(if $(PALLET),$(PALLET),$(error Must set PALLET)) \ --wasm-execution compiled --extrinsic '*' \ $(if $(TEMPLATE),$(TEMPLATE),--template=.maintain/frame-weight-template.hbs) --steps=50 --repeat=80 --heap-pages=4096 \ From 1acfc3911a326c9328d21f1cfd956edf367f8231 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Wed, 27 Sep 2023 10:04:32 +0200 Subject: [PATCH 045/143] chore: bench weights repeats 400 --- pallets/app-promotion/src/weights.rs | 102 +++--- pallets/collator-selection/src/weights.rs | 154 ++++----- pallets/common/src/weights.rs | 46 +-- pallets/configuration/src/weights.rs | 54 +-- pallets/evm-migration/src/weights.rs | 78 ++--- pallets/foreign-assets/src/weights.rs | 22 +- pallets/fungible/src/weights.rs | 86 ++--- pallets/identity/src/weights.rs | 398 +++++++++++----------- pallets/maintenance/src/weights.rs | 30 +- pallets/nonfungible/src/weights.rs | 214 ++++++------ pallets/refungible/src/weights.rs | 270 +++++++-------- pallets/structure/src/weights.rs | 14 +- pallets/unique/src/weights.rs | 110 +++--- primitives/common/src/constants.rs | 4 +- runtime/common/weights/xcm.rs | 70 ++-- 15 files changed, 826 insertions(+), 826 deletions(-) diff --git a/pallets/app-promotion/src/weights.rs b/pallets/app-promotion/src/weights.rs index ca71a10f3e..c2cfd140bd 100644 --- a/pallets/app-promotion/src/weights.rs +++ b/pallets/app-promotion/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_app_promotion //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/app-promotion/src/weights.rs @@ -63,10 +63,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `222 + b * (285 ±0)` // Estimated: `3622 + b * (3774 ±0)` - // Minimum execution time: 5_034_000 picoseconds. - Weight::from_parts(5_845_442, 3622) - // Standard Error: 18_650 - .saturating_add(Weight::from_parts(13_172_650, 0).saturating_mul(b.into())) + // Minimum execution time: 4_107_000 picoseconds. + Weight::from_parts(4_751_973, 3622) + // Standard Error: 4_668 + .saturating_add(Weight::from_parts(10_570_330, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -78,8 +78,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_838_000 picoseconds. - Weight::from_parts(5_022_000, 0) + // Minimum execution time: 3_459_000 picoseconds. + Weight::from_parts(3_627_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: AppPromotion Admin (r:1 w:0) @@ -105,10 +105,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `564 + b * (641 ±0)` // Estimated: `3593 + b * (25550 ±0)` - // Minimum execution time: 88_317_000 picoseconds. - Weight::from_parts(3_660_713, 3593) - // Standard Error: 17_984 - .saturating_add(Weight::from_parts(58_197_541, 0).saturating_mul(b.into())) + // Minimum execution time: 73_245_000 picoseconds. + Weight::from_parts(74_196_000, 3593) + // Standard Error: 8_231 + .saturating_add(Weight::from_parts(49_090_053, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((13_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -135,8 +135,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `389` // Estimated: `4764` - // Minimum execution time: 27_232_000 picoseconds. - Weight::from_parts(27_657_000, 4764) + // Minimum execution time: 21_088_000 picoseconds. + Weight::from_parts(21_639_000, 4764) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -154,8 +154,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 49_829_000 picoseconds. - Weight::from_parts(50_668_000, 29095) + // Minimum execution time: 42_086_000 picoseconds. + Weight::from_parts(43_149_000, 29095) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -173,8 +173,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 55_678_000 picoseconds. - Weight::from_parts(56_709_000, 29095) + // Minimum execution time: 46_458_000 picoseconds. + Weight::from_parts(47_333_000, 29095) .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -186,8 +186,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1060` // Estimated: `4325` - // Minimum execution time: 16_628_000 picoseconds. - Weight::from_parts(16_968_000, 4325) + // Minimum execution time: 12_827_000 picoseconds. + Weight::from_parts(13_610_000, 4325) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -199,8 +199,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1092` // Estimated: `4325` - // Minimum execution time: 15_970_000 picoseconds. - Weight::from_parts(16_316_000, 4325) + // Minimum execution time: 11_899_000 picoseconds. + Weight::from_parts(12_303_000, 4325) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -212,8 +212,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `198` // Estimated: `1517` - // Minimum execution time: 13_052_000 picoseconds. - Weight::from_parts(13_555_000, 1517) + // Minimum execution time: 10_226_000 picoseconds. + Weight::from_parts(10_549_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -225,8 +225,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `396` // Estimated: `3527` - // Minimum execution time: 13_791_000 picoseconds. - Weight::from_parts(14_125_000, 3527) + // Minimum execution time: 10_528_000 picoseconds. + Weight::from_parts(10_842_000, 3527) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -249,10 +249,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `222 + b * (285 ±0)` // Estimated: `3622 + b * (3774 ±0)` - // Minimum execution time: 5_034_000 picoseconds. - Weight::from_parts(5_845_442, 3622) - // Standard Error: 18_650 - .saturating_add(Weight::from_parts(13_172_650, 0).saturating_mul(b.into())) + // Minimum execution time: 4_107_000 picoseconds. + Weight::from_parts(4_751_973, 3622) + // Standard Error: 4_668 + .saturating_add(Weight::from_parts(10_570_330, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -264,8 +264,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_838_000 picoseconds. - Weight::from_parts(5_022_000, 0) + // Minimum execution time: 3_459_000 picoseconds. + Weight::from_parts(3_627_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: AppPromotion Admin (r:1 w:0) @@ -291,10 +291,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `564 + b * (641 ±0)` // Estimated: `3593 + b * (25550 ±0)` - // Minimum execution time: 88_317_000 picoseconds. - Weight::from_parts(3_660_713, 3593) - // Standard Error: 17_984 - .saturating_add(Weight::from_parts(58_197_541, 0).saturating_mul(b.into())) + // Minimum execution time: 73_245_000 picoseconds. + Weight::from_parts(74_196_000, 3593) + // Standard Error: 8_231 + .saturating_add(Weight::from_parts(49_090_053, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((13_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -321,8 +321,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `389` // Estimated: `4764` - // Minimum execution time: 27_232_000 picoseconds. - Weight::from_parts(27_657_000, 4764) + // Minimum execution time: 21_088_000 picoseconds. + Weight::from_parts(21_639_000, 4764) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -340,8 +340,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 49_829_000 picoseconds. - Weight::from_parts(50_668_000, 29095) + // Minimum execution time: 42_086_000 picoseconds. + Weight::from_parts(43_149_000, 29095) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -359,8 +359,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 55_678_000 picoseconds. - Weight::from_parts(56_709_000, 29095) + // Minimum execution time: 46_458_000 picoseconds. + Weight::from_parts(47_333_000, 29095) .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -372,8 +372,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1060` // Estimated: `4325` - // Minimum execution time: 16_628_000 picoseconds. - Weight::from_parts(16_968_000, 4325) + // Minimum execution time: 12_827_000 picoseconds. + Weight::from_parts(13_610_000, 4325) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -385,8 +385,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1092` // Estimated: `4325` - // Minimum execution time: 15_970_000 picoseconds. - Weight::from_parts(16_316_000, 4325) + // Minimum execution time: 11_899_000 picoseconds. + Weight::from_parts(12_303_000, 4325) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -398,8 +398,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `198` // Estimated: `1517` - // Minimum execution time: 13_052_000 picoseconds. - Weight::from_parts(13_555_000, 1517) + // Minimum execution time: 10_226_000 picoseconds. + Weight::from_parts(10_549_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -411,8 +411,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `396` // Estimated: `3527` - // Minimum execution time: 13_791_000 picoseconds. - Weight::from_parts(14_125_000, 3527) + // Minimum execution time: 10_528_000 picoseconds. + Weight::from_parts(10_842_000, 3527) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/collator-selection/src/weights.rs b/pallets/collator-selection/src/weights.rs index 8080bbed0a..c06f1f39a8 100644 --- a/pallets/collator-selection/src/weights.rs +++ b/pallets/collator-selection/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_collator_selection //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/collator-selection/src/weights.rs @@ -58,10 +58,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `403 + b * (45 ±0)` // Estimated: `3873 + b * (45 ±0)` - // Minimum execution time: 13_780_000 picoseconds. - Weight::from_parts(14_067_943, 3873) - // Standard Error: 1_187 - .saturating_add(Weight::from_parts(168_052, 0).saturating_mul(b.into())) + // Minimum execution time: 10_975_000 picoseconds. + Weight::from_parts(11_362_608, 3873) + // Standard Error: 411 + .saturating_add(Weight::from_parts(152_014, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(b.into())) @@ -73,10 +73,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `96 + b * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 8_583_000 picoseconds. - Weight::from_parts(8_833_981, 1806) - // Standard Error: 1_399 - .saturating_add(Weight::from_parts(140_293, 0).saturating_mul(b.into())) + // Minimum execution time: 6_369_000 picoseconds. + Weight::from_parts(6_604_933, 1806) + // Standard Error: 424 + .saturating_add(Weight::from_parts(145_929, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -93,10 +93,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `668 + c * (46 ±0)` // Estimated: `4131 + c * (47 ±0)` - // Minimum execution time: 29_155_000 picoseconds. - Weight::from_parts(31_569_846, 4131) - // Standard Error: 10_912 - .saturating_add(Weight::from_parts(547_194, 0).saturating_mul(c.into())) + // Minimum execution time: 23_857_000 picoseconds. + Weight::from_parts(25_984_655, 4131) + // Standard Error: 4_364 + .saturating_add(Weight::from_parts(521_198, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 47).saturating_mul(c.into())) @@ -118,10 +118,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `414 + c * (54 ±0)` // Estimated: `3529` - // Minimum execution time: 17_999_000 picoseconds. - Weight::from_parts(18_533_629, 3529) - // Standard Error: 3_238 - .saturating_add(Weight::from_parts(299_090, 0).saturating_mul(c.into())) + // Minimum execution time: 14_337_000 picoseconds. + Weight::from_parts(14_827_525, 3529) + // Standard Error: 1_210 + .saturating_add(Weight::from_parts(298_748, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -134,10 +134,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `111 + c * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 9_845_000 picoseconds. - Weight::from_parts(10_209_005, 1806) - // Standard Error: 1_137 - .saturating_add(Weight::from_parts(156_275, 0).saturating_mul(c.into())) + // Minimum execution time: 7_320_000 picoseconds. + Weight::from_parts(7_646_004, 1806) + // Standard Error: 479 + .saturating_add(Weight::from_parts(160_089, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -154,10 +154,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `328 + c * (103 ±0)` // Estimated: `3834` - // Minimum execution time: 28_700_000 picoseconds. - Weight::from_parts(29_499_805, 3834) - // Standard Error: 16_180 - .saturating_add(Weight::from_parts(880_131, 0).saturating_mul(c.into())) + // Minimum execution time: 22_821_000 picoseconds. + Weight::from_parts(23_668_202, 3834) + // Standard Error: 6_654 + .saturating_add(Weight::from_parts(844_978, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -174,10 +174,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `328 + c * (103 ±0)` // Estimated: `3834` - // Minimum execution time: 27_941_000 picoseconds. - Weight::from_parts(28_960_442, 3834) - // Standard Error: 17_391 - .saturating_add(Weight::from_parts(885_880, 0).saturating_mul(c.into())) + // Minimum execution time: 22_462_000 picoseconds. + Weight::from_parts(23_215_875, 3834) + // Standard Error: 6_450 + .saturating_add(Weight::from_parts(830_887, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -189,10 +189,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) fn note_author() -> Weight { // Proof Size summary in bytes: - // Measured: `157` + // Measured: `155` // Estimated: `6196` - // Minimum execution time: 22_833_000 picoseconds. - Weight::from_parts(23_223_000, 6196) + // Minimum execution time: 17_624_000 picoseconds. + Weight::from_parts(18_025_000, 6196) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -216,12 +216,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[1, 8]`. fn new_session(r: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `727 + c * (84 ±0) + r * (254 ±0)` - // Estimated: `26857 + c * (2519 ±0) + r * (2844 ±4)` - // Minimum execution time: 15_283_000 picoseconds. - Weight::from_parts(15_615_000, 26857) - // Standard Error: 188_448 - .saturating_add(Weight::from_parts(15_548_718, 0).saturating_mul(c.into())) + // Measured: `725 + c * (84 ±0) + r * (254 ±0)` + // Estimated: `6196 + c * (2519 ±0) + r * (2844 ±0)` + // Minimum execution time: 11_318_000 picoseconds. + Weight::from_parts(11_615_000, 6196) + // Standard Error: 69_557 + .saturating_add(Weight::from_parts(13_016_275, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -244,10 +244,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `403 + b * (45 ±0)` // Estimated: `3873 + b * (45 ±0)` - // Minimum execution time: 13_780_000 picoseconds. - Weight::from_parts(14_067_943, 3873) - // Standard Error: 1_187 - .saturating_add(Weight::from_parts(168_052, 0).saturating_mul(b.into())) + // Minimum execution time: 10_975_000 picoseconds. + Weight::from_parts(11_362_608, 3873) + // Standard Error: 411 + .saturating_add(Weight::from_parts(152_014, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(b.into())) @@ -259,10 +259,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `96 + b * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 8_583_000 picoseconds. - Weight::from_parts(8_833_981, 1806) - // Standard Error: 1_399 - .saturating_add(Weight::from_parts(140_293, 0).saturating_mul(b.into())) + // Minimum execution time: 6_369_000 picoseconds. + Weight::from_parts(6_604_933, 1806) + // Standard Error: 424 + .saturating_add(Weight::from_parts(145_929, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -279,10 +279,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `668 + c * (46 ±0)` // Estimated: `4131 + c * (47 ±0)` - // Minimum execution time: 29_155_000 picoseconds. - Weight::from_parts(31_569_846, 4131) - // Standard Error: 10_912 - .saturating_add(Weight::from_parts(547_194, 0).saturating_mul(c.into())) + // Minimum execution time: 23_857_000 picoseconds. + Weight::from_parts(25_984_655, 4131) + // Standard Error: 4_364 + .saturating_add(Weight::from_parts(521_198, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 47).saturating_mul(c.into())) @@ -304,10 +304,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `414 + c * (54 ±0)` // Estimated: `3529` - // Minimum execution time: 17_999_000 picoseconds. - Weight::from_parts(18_533_629, 3529) - // Standard Error: 3_238 - .saturating_add(Weight::from_parts(299_090, 0).saturating_mul(c.into())) + // Minimum execution time: 14_337_000 picoseconds. + Weight::from_parts(14_827_525, 3529) + // Standard Error: 1_210 + .saturating_add(Weight::from_parts(298_748, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -320,10 +320,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `111 + c * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 9_845_000 picoseconds. - Weight::from_parts(10_209_005, 1806) - // Standard Error: 1_137 - .saturating_add(Weight::from_parts(156_275, 0).saturating_mul(c.into())) + // Minimum execution time: 7_320_000 picoseconds. + Weight::from_parts(7_646_004, 1806) + // Standard Error: 479 + .saturating_add(Weight::from_parts(160_089, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -340,10 +340,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `328 + c * (103 ±0)` // Estimated: `3834` - // Minimum execution time: 28_700_000 picoseconds. - Weight::from_parts(29_499_805, 3834) - // Standard Error: 16_180 - .saturating_add(Weight::from_parts(880_131, 0).saturating_mul(c.into())) + // Minimum execution time: 22_821_000 picoseconds. + Weight::from_parts(23_668_202, 3834) + // Standard Error: 6_654 + .saturating_add(Weight::from_parts(844_978, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -360,10 +360,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `328 + c * (103 ±0)` // Estimated: `3834` - // Minimum execution time: 27_941_000 picoseconds. - Weight::from_parts(28_960_442, 3834) - // Standard Error: 17_391 - .saturating_add(Weight::from_parts(885_880, 0).saturating_mul(c.into())) + // Minimum execution time: 22_462_000 picoseconds. + Weight::from_parts(23_215_875, 3834) + // Standard Error: 6_450 + .saturating_add(Weight::from_parts(830_887, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -375,10 +375,10 @@ impl WeightInfo for () { /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) fn note_author() -> Weight { // Proof Size summary in bytes: - // Measured: `157` + // Measured: `155` // Estimated: `6196` - // Minimum execution time: 22_833_000 picoseconds. - Weight::from_parts(23_223_000, 6196) + // Minimum execution time: 17_624_000 picoseconds. + Weight::from_parts(18_025_000, 6196) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -402,12 +402,12 @@ impl WeightInfo for () { /// The range of component `c` is `[1, 8]`. fn new_session(r: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `727 + c * (84 ±0) + r * (254 ±0)` - // Estimated: `26857 + c * (2519 ±0) + r * (2844 ±4)` - // Minimum execution time: 15_283_000 picoseconds. - Weight::from_parts(15_615_000, 26857) - // Standard Error: 188_448 - .saturating_add(Weight::from_parts(15_548_718, 0).saturating_mul(c.into())) + // Measured: `725 + c * (84 ±0) + r * (254 ±0)` + // Estimated: `6196 + c * (2519 ±0) + r * (2844 ±0)` + // Minimum execution time: 11_318_000 picoseconds. + Weight::from_parts(11_615_000, 6196) + // Standard Error: 69_557 + .saturating_add(Weight::from_parts(13_016_275, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) diff --git a/pallets/common/src/weights.rs b/pallets/common/src/weights.rs index 6b8edb32f2..b0f12e3051 100644 --- a/pallets/common/src/weights.rs +++ b/pallets/common/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_common //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/common/src/weights.rs @@ -48,10 +48,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 6_140_000 picoseconds. - Weight::from_parts(2_807_337, 44457) - // Standard Error: 15_773 - .saturating_add(Weight::from_parts(5_454_582, 0).saturating_mul(b.into())) + // Minimum execution time: 4_862_000 picoseconds. + Weight::from_parts(5_003_000, 44457) + // Standard Error: 3_889 + .saturating_add(Weight::from_parts(4_918_195, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -62,10 +62,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `303 + b * (33030 ±0)` // Estimated: `44457` - // Minimum execution time: 5_969_000 picoseconds. - Weight::from_parts(6_153_000, 44457) - // Standard Error: 82_905 - .saturating_add(Weight::from_parts(23_575_983, 0).saturating_mul(b.into())) + // Minimum execution time: 4_739_000 picoseconds. + Weight::from_parts(4_887_000, 44457) + // Standard Error: 37_951 + .saturating_add(Weight::from_parts(23_410_931, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,8 +75,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 4_999_000 picoseconds. - Weight::from_parts(5_178_000, 3535) + // Minimum execution time: 4_183_000 picoseconds. + Weight::from_parts(4_391_000, 3535) .saturating_add(T::DbWeight::get().reads(1_u64)) } } @@ -90,10 +90,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 6_140_000 picoseconds. - Weight::from_parts(2_807_337, 44457) - // Standard Error: 15_773 - .saturating_add(Weight::from_parts(5_454_582, 0).saturating_mul(b.into())) + // Minimum execution time: 4_862_000 picoseconds. + Weight::from_parts(5_003_000, 44457) + // Standard Error: 3_889 + .saturating_add(Weight::from_parts(4_918_195, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -104,10 +104,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `303 + b * (33030 ±0)` // Estimated: `44457` - // Minimum execution time: 5_969_000 picoseconds. - Weight::from_parts(6_153_000, 44457) - // Standard Error: 82_905 - .saturating_add(Weight::from_parts(23_575_983, 0).saturating_mul(b.into())) + // Minimum execution time: 4_739_000 picoseconds. + Weight::from_parts(4_887_000, 44457) + // Standard Error: 37_951 + .saturating_add(Weight::from_parts(23_410_931, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -117,8 +117,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 4_999_000 picoseconds. - Weight::from_parts(5_178_000, 3535) + // Minimum execution time: 4_183_000 picoseconds. + Weight::from_parts(4_391_000, 3535) .saturating_add(RocksDbWeight::get().reads(1_u64)) } } diff --git a/pallets/configuration/src/weights.rs b/pallets/configuration/src/weights.rs index dba588351a..99e47ed2f7 100644 --- a/pallets/configuration/src/weights.rs +++ b/pallets/configuration/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_configuration //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/configuration/src/weights.rs @@ -50,8 +50,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_496_000 picoseconds. - Weight::from_parts(1_575_000, 0) + // Minimum execution time: 990_000 picoseconds. + Weight::from_parts(1_090_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Configuration MinGasPriceOverride (r:0 w:1) @@ -64,8 +64,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_293_000 picoseconds. - Weight::from_parts(2_425_000, 0) + // Minimum execution time: 1_469_000 picoseconds. + Weight::from_parts(1_565_000, 0) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: Configuration AppPromomotionConfigurationOverride (r:0 w:1) @@ -74,8 +74,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_542_000 picoseconds. - Weight::from_parts(1_633_000, 0) + // Minimum execution time: 1_027_000 picoseconds. + Weight::from_parts(1_098_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionDesiredCollatorsOverride (r:0 w:1) @@ -84,8 +84,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_388_000 picoseconds. - Weight::from_parts(6_639_000, 0) + // Minimum execution time: 4_149_000 picoseconds. + Weight::from_parts(4_326_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionLicenseBondOverride (r:0 w:1) @@ -94,8 +94,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_781_000 picoseconds. - Weight::from_parts(3_947_000, 0) + // Minimum execution time: 2_758_000 picoseconds. + Weight::from_parts(2_911_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:0 w:1) @@ -104,8 +104,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_839_000 picoseconds. - Weight::from_parts(4_030_000, 0) + // Minimum execution time: 2_695_000 picoseconds. + Weight::from_parts(2_829_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -118,8 +118,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_496_000 picoseconds. - Weight::from_parts(1_575_000, 0) + // Minimum execution time: 990_000 picoseconds. + Weight::from_parts(1_090_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Configuration MinGasPriceOverride (r:0 w:1) @@ -132,8 +132,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_293_000 picoseconds. - Weight::from_parts(2_425_000, 0) + // Minimum execution time: 1_469_000 picoseconds. + Weight::from_parts(1_565_000, 0) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: Configuration AppPromomotionConfigurationOverride (r:0 w:1) @@ -142,8 +142,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_542_000 picoseconds. - Weight::from_parts(1_633_000, 0) + // Minimum execution time: 1_027_000 picoseconds. + Weight::from_parts(1_098_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionDesiredCollatorsOverride (r:0 w:1) @@ -152,8 +152,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_388_000 picoseconds. - Weight::from_parts(6_639_000, 0) + // Minimum execution time: 4_149_000 picoseconds. + Weight::from_parts(4_326_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionLicenseBondOverride (r:0 w:1) @@ -162,8 +162,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_781_000 picoseconds. - Weight::from_parts(3_947_000, 0) + // Minimum execution time: 2_758_000 picoseconds. + Weight::from_parts(2_911_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:0 w:1) @@ -172,8 +172,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_839_000 picoseconds. - Weight::from_parts(4_030_000, 0) + // Minimum execution time: 2_695_000 picoseconds. + Weight::from_parts(2_829_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/pallets/evm-migration/src/weights.rs b/pallets/evm-migration/src/weights.rs index 487fbe51be..66f307fb43 100644 --- a/pallets/evm-migration/src/weights.rs +++ b/pallets/evm-migration/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_evm_migration //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/evm-migration/src/weights.rs @@ -53,8 +53,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `94` // Estimated: `3593` - // Minimum execution time: 7_754_000 picoseconds. - Weight::from_parts(7_933_000, 3593) + // Minimum execution time: 6_131_000 picoseconds. + Weight::from_parts(6_351_000, 3593) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -67,10 +67,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `96` // Estimated: `3494` - // Minimum execution time: 5_694_000 picoseconds. - Weight::from_parts(5_971_220, 3494) - // Standard Error: 724 - .saturating_add(Weight::from_parts(802_677, 0).saturating_mul(b.into())) + // Minimum execution time: 4_522_000 picoseconds. + Weight::from_parts(4_569_839, 3494) + // Standard Error: 253 + .saturating_add(Weight::from_parts(743_780, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(b.into()))) } @@ -83,10 +83,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `96` // Estimated: `3494` - // Minimum execution time: 6_960_000 picoseconds. - Weight::from_parts(7_397_101, 3494) - // Standard Error: 65 - .saturating_add(Weight::from_parts(1_120, 0).saturating_mul(b.into())) + // Minimum execution time: 5_329_000 picoseconds. + Weight::from_parts(5_677_312, 3494) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_369, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -95,20 +95,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_188_000 picoseconds. - Weight::from_parts(2_079_143, 0) - // Standard Error: 634 - .saturating_add(Weight::from_parts(541_830, 0).saturating_mul(b.into())) + // Minimum execution time: 890_000 picoseconds. + Weight::from_parts(1_279_871, 0) + // Standard Error: 112 + .saturating_add(Weight::from_parts(408_968, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 200]`. fn insert_events(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_223_000 picoseconds. - Weight::from_parts(2_614_838, 0) - // Standard Error: 878 - .saturating_add(Weight::from_parts(1_212_908, 0).saturating_mul(b.into())) + // Minimum execution time: 896_000 picoseconds. + Weight::from_parts(1_975_680, 0) + // Standard Error: 117 + .saturating_add(Weight::from_parts(1_003_721, 0).saturating_mul(b.into())) } } @@ -124,8 +124,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `94` // Estimated: `3593` - // Minimum execution time: 7_754_000 picoseconds. - Weight::from_parts(7_933_000, 3593) + // Minimum execution time: 6_131_000 picoseconds. + Weight::from_parts(6_351_000, 3593) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -138,10 +138,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `96` // Estimated: `3494` - // Minimum execution time: 5_694_000 picoseconds. - Weight::from_parts(5_971_220, 3494) - // Standard Error: 724 - .saturating_add(Weight::from_parts(802_677, 0).saturating_mul(b.into())) + // Minimum execution time: 4_522_000 picoseconds. + Weight::from_parts(4_569_839, 3494) + // Standard Error: 253 + .saturating_add(Weight::from_parts(743_780, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(b.into()))) } @@ -154,10 +154,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `96` // Estimated: `3494` - // Minimum execution time: 6_960_000 picoseconds. - Weight::from_parts(7_397_101, 3494) - // Standard Error: 65 - .saturating_add(Weight::from_parts(1_120, 0).saturating_mul(b.into())) + // Minimum execution time: 5_329_000 picoseconds. + Weight::from_parts(5_677_312, 3494) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_369, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -166,20 +166,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_188_000 picoseconds. - Weight::from_parts(2_079_143, 0) - // Standard Error: 634 - .saturating_add(Weight::from_parts(541_830, 0).saturating_mul(b.into())) + // Minimum execution time: 890_000 picoseconds. + Weight::from_parts(1_279_871, 0) + // Standard Error: 112 + .saturating_add(Weight::from_parts(408_968, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 200]`. fn insert_events(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_223_000 picoseconds. - Weight::from_parts(2_614_838, 0) - // Standard Error: 878 - .saturating_add(Weight::from_parts(1_212_908, 0).saturating_mul(b.into())) + // Minimum execution time: 896_000 picoseconds. + Weight::from_parts(1_975_680, 0) + // Standard Error: 117 + .saturating_add(Weight::from_parts(1_003_721, 0).saturating_mul(b.into())) } } diff --git a/pallets/foreign-assets/src/weights.rs b/pallets/foreign-assets/src/weights.rs index 9d4c537618..faaeba0177 100644 --- a/pallets/foreign-assets/src/weights.rs +++ b/pallets/foreign-assets/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_foreign_assets //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/foreign-assets/src/weights.rs @@ -68,8 +68,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `286` // Estimated: `6196` - // Minimum execution time: 44_390_000 picoseconds. - Weight::from_parts(45_078_000, 6196) + // Minimum execution time: 33_294_000 picoseconds. + Weight::from_parts(34_011_000, 6196) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) } @@ -81,8 +81,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `197` // Estimated: `4079` - // Minimum execution time: 12_945_000 picoseconds. - Weight::from_parts(13_629_000, 4079) + // Minimum execution time: 9_296_000 picoseconds. + Weight::from_parts(9_594_000, 4079) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -118,8 +118,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `286` // Estimated: `6196` - // Minimum execution time: 44_390_000 picoseconds. - Weight::from_parts(45_078_000, 6196) + // Minimum execution time: 33_294_000 picoseconds. + Weight::from_parts(34_011_000, 6196) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) } @@ -131,8 +131,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `197` // Estimated: `4079` - // Minimum execution time: 12_945_000 picoseconds. - Weight::from_parts(13_629_000, 4079) + // Minimum execution time: 9_296_000 picoseconds. + Weight::from_parts(9_594_000, 4079) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/pallets/fungible/src/weights.rs b/pallets/fungible/src/weights.rs index b9e478558f..f3522c4101 100644 --- a/pallets/fungible/src/weights.rs +++ b/pallets/fungible/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_fungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/fungible/src/weights.rs @@ -55,8 +55,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3542` - // Minimum execution time: 9_344_000 picoseconds. - Weight::from_parts(9_600_000, 3542) + // Minimum execution time: 7_228_000 picoseconds. + Weight::from_parts(7_472_000, 3542) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -69,10 +69,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493 + b * (2552 ±0)` - // Minimum execution time: 2_993_000 picoseconds. - Weight::from_parts(5_240_270, 3493) - // Standard Error: 1_763 - .saturating_add(Weight::from_parts(3_193_198, 0).saturating_mul(b.into())) + // Minimum execution time: 2_398_000 picoseconds. + Weight::from_parts(4_432_908, 3493) + // Standard Error: 263 + .saturating_add(Weight::from_parts(2_617_422, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -87,8 +87,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `197` // Estimated: `3542` - // Minimum execution time: 11_725_000 picoseconds. - Weight::from_parts(12_140_000, 3542) + // Minimum execution time: 9_444_000 picoseconds. + Weight::from_parts(9_742_000, 3542) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -98,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `182` // Estimated: `6094` - // Minimum execution time: 12_552_000 picoseconds. - Weight::from_parts(12_894_000, 6094) + // Minimum execution time: 9_553_000 picoseconds. + Weight::from_parts(9_852_000, 6094) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -111,8 +111,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `182` // Estimated: `3542` - // Minimum execution time: 10_854_000 picoseconds. - Weight::from_parts(11_125_000, 3542) + // Minimum execution time: 8_435_000 picoseconds. + Weight::from_parts(8_714_000, 3542) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -124,8 +124,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3542` - // Minimum execution time: 10_912_000 picoseconds. - Weight::from_parts(11_163_000, 3542) + // Minimum execution time: 8_475_000 picoseconds. + Weight::from_parts(8_735_000, 3542) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -135,8 +135,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `210` // Estimated: `3558` - // Minimum execution time: 5_283_000 picoseconds. - Weight::from_parts(5_474_000, 3558) + // Minimum execution time: 4_426_000 picoseconds. + Weight::from_parts(4_604_000, 3558) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Fungible Allowance (r:0 w:1) @@ -145,8 +145,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_774_000 picoseconds. - Weight::from_parts(5_909_000, 0) + // Minimum execution time: 4_130_000 picoseconds. + Weight::from_parts(4_275_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Fungible Allowance (r:1 w:1) @@ -159,8 +159,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3558` - // Minimum execution time: 18_184_000 picoseconds. - Weight::from_parts(18_524_000, 3558) + // Minimum execution time: 14_878_000 picoseconds. + Weight::from_parts(15_263_000, 3558) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -176,8 +176,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3542` - // Minimum execution time: 9_344_000 picoseconds. - Weight::from_parts(9_600_000, 3542) + // Minimum execution time: 7_228_000 picoseconds. + Weight::from_parts(7_472_000, 3542) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -190,10 +190,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493 + b * (2552 ±0)` - // Minimum execution time: 2_993_000 picoseconds. - Weight::from_parts(5_240_270, 3493) - // Standard Error: 1_763 - .saturating_add(Weight::from_parts(3_193_198, 0).saturating_mul(b.into())) + // Minimum execution time: 2_398_000 picoseconds. + Weight::from_parts(4_432_908, 3493) + // Standard Error: 263 + .saturating_add(Weight::from_parts(2_617_422, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -208,8 +208,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `197` // Estimated: `3542` - // Minimum execution time: 11_725_000 picoseconds. - Weight::from_parts(12_140_000, 3542) + // Minimum execution time: 9_444_000 picoseconds. + Weight::from_parts(9_742_000, 3542) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -219,8 +219,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `182` // Estimated: `6094` - // Minimum execution time: 12_552_000 picoseconds. - Weight::from_parts(12_894_000, 6094) + // Minimum execution time: 9_553_000 picoseconds. + Weight::from_parts(9_852_000, 6094) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -232,8 +232,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `182` // Estimated: `3542` - // Minimum execution time: 10_854_000 picoseconds. - Weight::from_parts(11_125_000, 3542) + // Minimum execution time: 8_435_000 picoseconds. + Weight::from_parts(8_714_000, 3542) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -245,8 +245,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3542` - // Minimum execution time: 10_912_000 picoseconds. - Weight::from_parts(11_163_000, 3542) + // Minimum execution time: 8_475_000 picoseconds. + Weight::from_parts(8_735_000, 3542) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -256,8 +256,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `210` // Estimated: `3558` - // Minimum execution time: 5_283_000 picoseconds. - Weight::from_parts(5_474_000, 3558) + // Minimum execution time: 4_426_000 picoseconds. + Weight::from_parts(4_604_000, 3558) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Fungible Allowance (r:0 w:1) @@ -266,8 +266,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_774_000 picoseconds. - Weight::from_parts(5_909_000, 0) + // Minimum execution time: 4_130_000 picoseconds. + Weight::from_parts(4_275_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Fungible Allowance (r:1 w:1) @@ -280,8 +280,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3558` - // Minimum execution time: 18_184_000 picoseconds. - Weight::from_parts(18_524_000, 3558) + // Minimum execution time: 14_878_000 picoseconds. + Weight::from_parts(15_263_000, 3558) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/pallets/identity/src/weights.rs b/pallets/identity/src/weights.rs index 483bf012ee..753b6c8e35 100644 --- a/pallets/identity/src/weights.rs +++ b/pallets/identity/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_identity //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-27, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/identity/src/weights.rs @@ -64,10 +64,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `31 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 8_952_000 picoseconds. - Weight::from_parts(9_493_179, 2626) - // Standard Error: 776 - .saturating_add(Weight::from_parts(95_408, 0).saturating_mul(r.into())) + // Minimum execution time: 6_759_000 picoseconds. + Weight::from_parts(7_254_560, 2626) + // Standard Error: 231 + .saturating_add(Weight::from_parts(64_513, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -79,12 +79,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `441 + r * (5 ±0)` // Estimated: `11003` - // Minimum execution time: 18_573_000 picoseconds. - Weight::from_parts(16_665_333, 11003) - // Standard Error: 1_927 - .saturating_add(Weight::from_parts(93_600, 0).saturating_mul(r.into())) - // Standard Error: 376 - .saturating_add(Weight::from_parts(134_895, 0).saturating_mul(x.into())) + // Minimum execution time: 14_134_000 picoseconds. + Weight::from_parts(12_591_985, 11003) + // Standard Error: 562 + .saturating_add(Weight::from_parts(77_682, 0).saturating_mul(r.into())) + // Standard Error: 109 + .saturating_add(Weight::from_parts(96_303, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -99,10 +99,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `100` // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 6_669_000 picoseconds. - Weight::from_parts(15_167_928, 11003) - // Standard Error: 1_551 - .saturating_add(Weight::from_parts(1_294_015, 0).saturating_mul(s.into())) + // Minimum execution time: 4_763_000 picoseconds. + Weight::from_parts(11_344_974, 11003) + // Standard Error: 401 + .saturating_add(Weight::from_parts(1_141_028, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -120,10 +120,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `193 + p * (32 ±0)` // Estimated: `11003` - // Minimum execution time: 6_642_000 picoseconds. - Weight::from_parts(15_473_100, 11003) - // Standard Error: 1_132 - .saturating_add(Weight::from_parts(592_570, 0).saturating_mul(p.into())) + // Minimum execution time: 4_783_000 picoseconds. + Weight::from_parts(11_531_027, 11003) + // Standard Error: 369 + .saturating_add(Weight::from_parts(542_102, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -141,14 +141,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `468 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 29_150_000 picoseconds. - Weight::from_parts(20_982_965, 11003) - // Standard Error: 2_448 - .saturating_add(Weight::from_parts(17_611, 0).saturating_mul(r.into())) - // Standard Error: 478 - .saturating_add(Weight::from_parts(582_119, 0).saturating_mul(s.into())) - // Standard Error: 478 - .saturating_add(Weight::from_parts(114_020, 0).saturating_mul(x.into())) + // Minimum execution time: 23_175_000 picoseconds. + Weight::from_parts(16_503_215, 11003) + // Standard Error: 625 + .saturating_add(Weight::from_parts(1_175, 0).saturating_mul(r.into())) + // Standard Error: 122 + .saturating_add(Weight::from_parts(533_184, 0).saturating_mul(s.into())) + // Standard Error: 122 + .saturating_add(Weight::from_parts(94_600, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -163,12 +163,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `366 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 20_088_000 picoseconds. - Weight::from_parts(18_524_805, 11003) - // Standard Error: 1_965 - .saturating_add(Weight::from_parts(74_420, 0).saturating_mul(r.into())) - // Standard Error: 383 - .saturating_add(Weight::from_parts(152_570, 0).saturating_mul(x.into())) + // Minimum execution time: 15_322_000 picoseconds. + Weight::from_parts(13_671_670, 11003) + // Standard Error: 722 + .saturating_add(Weight::from_parts(73_665, 0).saturating_mul(r.into())) + // Standard Error: 140 + .saturating_add(Weight::from_parts(124_598, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -180,12 +180,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `397 + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 17_277_000 picoseconds. - Weight::from_parts(16_760_327, 11003) - // Standard Error: 1_967 - .saturating_add(Weight::from_parts(33_499, 0).saturating_mul(r.into())) - // Standard Error: 383 - .saturating_add(Weight::from_parts(148_237, 0).saturating_mul(x.into())) + // Minimum execution time: 13_268_000 picoseconds. + Weight::from_parts(12_489_352, 11003) + // Standard Error: 544 + .saturating_add(Weight::from_parts(35_424, 0).saturating_mul(r.into())) + // Standard Error: 106 + .saturating_add(Weight::from_parts(123_149, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -196,10 +196,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_566_000 picoseconds. - Weight::from_parts(6_982_669, 2626) - // Standard Error: 613 - .saturating_add(Weight::from_parts(77_476, 0).saturating_mul(r.into())) + // Minimum execution time: 4_845_000 picoseconds. + Weight::from_parts(5_147_478, 2626) + // Standard Error: 169 + .saturating_add(Weight::from_parts(55_561, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -210,10 +210,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 5_856_000 picoseconds. - Weight::from_parts(6_185_551, 2626) - // Standard Error: 466 - .saturating_add(Weight::from_parts(76_432, 0).saturating_mul(r.into())) + // Minimum execution time: 4_191_000 picoseconds. + Weight::from_parts(4_478_351, 2626) + // Standard Error: 138 + .saturating_add(Weight::from_parts(53_627, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -224,10 +224,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 5_784_000 picoseconds. - Weight::from_parts(6_029_409, 2626) - // Standard Error: 389 - .saturating_add(Weight::from_parts(73_636, 0).saturating_mul(r.into())) + // Minimum execution time: 4_003_000 picoseconds. + Weight::from_parts(4_303_365, 2626) + // Standard Error: 147 + .saturating_add(Weight::from_parts(52_472, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -241,12 +241,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `444 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 15_250_000 picoseconds. - Weight::from_parts(14_264_575, 11003) - // Standard Error: 1_996 - .saturating_add(Weight::from_parts(45_342, 0).saturating_mul(r.into())) - // Standard Error: 369 - .saturating_add(Weight::from_parts(216_509, 0).saturating_mul(x.into())) + // Minimum execution time: 11_465_000 picoseconds. + Weight::from_parts(10_326_049, 11003) + // Standard Error: 660 + .saturating_add(Weight::from_parts(48_922, 0).saturating_mul(r.into())) + // Standard Error: 122 + .saturating_add(Weight::from_parts(185_374, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -265,14 +265,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `665 + r * (12 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 43_178_000 picoseconds. - Weight::from_parts(34_379_402, 11003) - // Standard Error: 2_843 - .saturating_add(Weight::from_parts(58_134, 0).saturating_mul(r.into())) - // Standard Error: 555 - .saturating_add(Weight::from_parts(596_395, 0).saturating_mul(s.into())) - // Standard Error: 555 - .saturating_add(Weight::from_parts(113_292, 0).saturating_mul(x.into())) + // Minimum execution time: 34_933_000 picoseconds. + Weight::from_parts(28_994_022, 11003) + // Standard Error: 668 + .saturating_add(Weight::from_parts(21_722, 0).saturating_mul(r.into())) + // Standard Error: 130 + .saturating_add(Weight::from_parts(540_580, 0).saturating_mul(s.into())) + // Standard Error: 130 + .saturating_add(Weight::from_parts(89_348, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -285,12 +285,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_093_000 picoseconds. - Weight::from_parts(4_184_000, 0) - // Standard Error: 688_514 - .saturating_add(Weight::from_parts(42_213_609, 0).saturating_mul(x.into())) - // Standard Error: 114_549 - .saturating_add(Weight::from_parts(8_812_982, 0).saturating_mul(n.into())) + // Minimum execution time: 2_770_000 picoseconds. + Weight::from_parts(2_875_000, 0) + // Standard Error: 281_295 + .saturating_add(Weight::from_parts(37_513_186, 0).saturating_mul(x.into())) + // Standard Error: 46_799 + .saturating_add(Weight::from_parts(7_949_936, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } /// Storage: Identity SubsOf (r:600 w:0) @@ -303,12 +303,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 3_998_000 picoseconds. - Weight::from_parts(4_142_000, 990) - // Standard Error: 2_992 - .saturating_add(Weight::from_parts(62_923, 0).saturating_mul(x.into())) - // Standard Error: 497 - .saturating_add(Weight::from_parts(1_122_767, 0).saturating_mul(n.into())) + // Minimum execution time: 2_751_000 picoseconds. + Weight::from_parts(2_862_000, 990) + // Standard Error: 953 + .saturating_add(Weight::from_parts(28_947, 0).saturating_mul(x.into())) + // Standard Error: 158 + .saturating_add(Weight::from_parts(994_085, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5733).saturating_mul(n.into())) @@ -323,12 +323,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 4_019_000 picoseconds. - Weight::from_parts(4_174_000, 990) - // Standard Error: 2_026_537 - .saturating_add(Weight::from_parts(127_217_493, 0).saturating_mul(s.into())) - // Standard Error: 337_157 - .saturating_add(Weight::from_parts(22_199_440, 0).saturating_mul(n.into())) + // Minimum execution time: 2_671_000 picoseconds. + Weight::from_parts(2_814_000, 990) + // Standard Error: 785_159 + .saturating_add(Weight::from_parts(109_659_566, 0).saturating_mul(s.into())) + // Standard Error: 130_628 + .saturating_add(Weight::from_parts(19_169_269, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -345,10 +345,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `474 + s * (36 ±0)` // Estimated: `11003` - // Minimum execution time: 16_361_000 picoseconds. - Weight::from_parts(20_622_408, 11003) - // Standard Error: 592 - .saturating_add(Weight::from_parts(48_502, 0).saturating_mul(s.into())) + // Minimum execution time: 12_571_000 picoseconds. + Weight::from_parts(16_366_301, 11003) + // Standard Error: 217 + .saturating_add(Weight::from_parts(42_542, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -361,10 +361,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `590 + s * (3 ±0)` // Estimated: `11003` - // Minimum execution time: 9_676_000 picoseconds. - Weight::from_parts(11_336_454, 11003) - // Standard Error: 240 - .saturating_add(Weight::from_parts(17_924, 0).saturating_mul(s.into())) + // Minimum execution time: 7_278_000 picoseconds. + Weight::from_parts(9_227_799, 11003) + // Standard Error: 104 + .saturating_add(Weight::from_parts(14_014, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -379,10 +379,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `637 + s * (35 ±0)` // Estimated: `11003` - // Minimum execution time: 20_304_000 picoseconds. - Weight::from_parts(22_890_354, 11003) - // Standard Error: 1_568 - .saturating_add(Weight::from_parts(40_002, 0).saturating_mul(s.into())) + // Minimum execution time: 15_771_000 picoseconds. + Weight::from_parts(18_105_475, 11003) + // Standard Error: 129 + .saturating_add(Weight::from_parts(32_074, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -397,10 +397,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `703 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 17_645_000 picoseconds. - Weight::from_parts(20_080_248, 6723) - // Standard Error: 1_331 - .saturating_add(Weight::from_parts(42_353, 0).saturating_mul(s.into())) + // Minimum execution time: 14_093_000 picoseconds. + Weight::from_parts(16_125_177, 6723) + // Standard Error: 146 + .saturating_add(Weight::from_parts(39_270, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -415,10 +415,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `31 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 8_952_000 picoseconds. - Weight::from_parts(9_493_179, 2626) - // Standard Error: 776 - .saturating_add(Weight::from_parts(95_408, 0).saturating_mul(r.into())) + // Minimum execution time: 6_759_000 picoseconds. + Weight::from_parts(7_254_560, 2626) + // Standard Error: 231 + .saturating_add(Weight::from_parts(64_513, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -430,12 +430,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `441 + r * (5 ±0)` // Estimated: `11003` - // Minimum execution time: 18_573_000 picoseconds. - Weight::from_parts(16_665_333, 11003) - // Standard Error: 1_927 - .saturating_add(Weight::from_parts(93_600, 0).saturating_mul(r.into())) - // Standard Error: 376 - .saturating_add(Weight::from_parts(134_895, 0).saturating_mul(x.into())) + // Minimum execution time: 14_134_000 picoseconds. + Weight::from_parts(12_591_985, 11003) + // Standard Error: 562 + .saturating_add(Weight::from_parts(77_682, 0).saturating_mul(r.into())) + // Standard Error: 109 + .saturating_add(Weight::from_parts(96_303, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -450,10 +450,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `100` // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 6_669_000 picoseconds. - Weight::from_parts(15_167_928, 11003) - // Standard Error: 1_551 - .saturating_add(Weight::from_parts(1_294_015, 0).saturating_mul(s.into())) + // Minimum execution time: 4_763_000 picoseconds. + Weight::from_parts(11_344_974, 11003) + // Standard Error: 401 + .saturating_add(Weight::from_parts(1_141_028, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -471,10 +471,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `193 + p * (32 ±0)` // Estimated: `11003` - // Minimum execution time: 6_642_000 picoseconds. - Weight::from_parts(15_473_100, 11003) - // Standard Error: 1_132 - .saturating_add(Weight::from_parts(592_570, 0).saturating_mul(p.into())) + // Minimum execution time: 4_783_000 picoseconds. + Weight::from_parts(11_531_027, 11003) + // Standard Error: 369 + .saturating_add(Weight::from_parts(542_102, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -492,14 +492,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `468 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 29_150_000 picoseconds. - Weight::from_parts(20_982_965, 11003) - // Standard Error: 2_448 - .saturating_add(Weight::from_parts(17_611, 0).saturating_mul(r.into())) - // Standard Error: 478 - .saturating_add(Weight::from_parts(582_119, 0).saturating_mul(s.into())) - // Standard Error: 478 - .saturating_add(Weight::from_parts(114_020, 0).saturating_mul(x.into())) + // Minimum execution time: 23_175_000 picoseconds. + Weight::from_parts(16_503_215, 11003) + // Standard Error: 625 + .saturating_add(Weight::from_parts(1_175, 0).saturating_mul(r.into())) + // Standard Error: 122 + .saturating_add(Weight::from_parts(533_184, 0).saturating_mul(s.into())) + // Standard Error: 122 + .saturating_add(Weight::from_parts(94_600, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -514,12 +514,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `366 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 20_088_000 picoseconds. - Weight::from_parts(18_524_805, 11003) - // Standard Error: 1_965 - .saturating_add(Weight::from_parts(74_420, 0).saturating_mul(r.into())) - // Standard Error: 383 - .saturating_add(Weight::from_parts(152_570, 0).saturating_mul(x.into())) + // Minimum execution time: 15_322_000 picoseconds. + Weight::from_parts(13_671_670, 11003) + // Standard Error: 722 + .saturating_add(Weight::from_parts(73_665, 0).saturating_mul(r.into())) + // Standard Error: 140 + .saturating_add(Weight::from_parts(124_598, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -531,12 +531,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `397 + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 17_277_000 picoseconds. - Weight::from_parts(16_760_327, 11003) - // Standard Error: 1_967 - .saturating_add(Weight::from_parts(33_499, 0).saturating_mul(r.into())) - // Standard Error: 383 - .saturating_add(Weight::from_parts(148_237, 0).saturating_mul(x.into())) + // Minimum execution time: 13_268_000 picoseconds. + Weight::from_parts(12_489_352, 11003) + // Standard Error: 544 + .saturating_add(Weight::from_parts(35_424, 0).saturating_mul(r.into())) + // Standard Error: 106 + .saturating_add(Weight::from_parts(123_149, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -547,10 +547,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_566_000 picoseconds. - Weight::from_parts(6_982_669, 2626) - // Standard Error: 613 - .saturating_add(Weight::from_parts(77_476, 0).saturating_mul(r.into())) + // Minimum execution time: 4_845_000 picoseconds. + Weight::from_parts(5_147_478, 2626) + // Standard Error: 169 + .saturating_add(Weight::from_parts(55_561, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -561,10 +561,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 5_856_000 picoseconds. - Weight::from_parts(6_185_551, 2626) - // Standard Error: 466 - .saturating_add(Weight::from_parts(76_432, 0).saturating_mul(r.into())) + // Minimum execution time: 4_191_000 picoseconds. + Weight::from_parts(4_478_351, 2626) + // Standard Error: 138 + .saturating_add(Weight::from_parts(53_627, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -575,10 +575,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `88 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 5_784_000 picoseconds. - Weight::from_parts(6_029_409, 2626) - // Standard Error: 389 - .saturating_add(Weight::from_parts(73_636, 0).saturating_mul(r.into())) + // Minimum execution time: 4_003_000 picoseconds. + Weight::from_parts(4_303_365, 2626) + // Standard Error: 147 + .saturating_add(Weight::from_parts(52_472, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -592,12 +592,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `444 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 15_250_000 picoseconds. - Weight::from_parts(14_264_575, 11003) - // Standard Error: 1_996 - .saturating_add(Weight::from_parts(45_342, 0).saturating_mul(r.into())) - // Standard Error: 369 - .saturating_add(Weight::from_parts(216_509, 0).saturating_mul(x.into())) + // Minimum execution time: 11_465_000 picoseconds. + Weight::from_parts(10_326_049, 11003) + // Standard Error: 660 + .saturating_add(Weight::from_parts(48_922, 0).saturating_mul(r.into())) + // Standard Error: 122 + .saturating_add(Weight::from_parts(185_374, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -616,14 +616,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `665 + r * (12 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 43_178_000 picoseconds. - Weight::from_parts(34_379_402, 11003) - // Standard Error: 2_843 - .saturating_add(Weight::from_parts(58_134, 0).saturating_mul(r.into())) - // Standard Error: 555 - .saturating_add(Weight::from_parts(596_395, 0).saturating_mul(s.into())) - // Standard Error: 555 - .saturating_add(Weight::from_parts(113_292, 0).saturating_mul(x.into())) + // Minimum execution time: 34_933_000 picoseconds. + Weight::from_parts(28_994_022, 11003) + // Standard Error: 668 + .saturating_add(Weight::from_parts(21_722, 0).saturating_mul(r.into())) + // Standard Error: 130 + .saturating_add(Weight::from_parts(540_580, 0).saturating_mul(s.into())) + // Standard Error: 130 + .saturating_add(Weight::from_parts(89_348, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -636,12 +636,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_093_000 picoseconds. - Weight::from_parts(4_184_000, 0) - // Standard Error: 688_514 - .saturating_add(Weight::from_parts(42_213_609, 0).saturating_mul(x.into())) - // Standard Error: 114_549 - .saturating_add(Weight::from_parts(8_812_982, 0).saturating_mul(n.into())) + // Minimum execution time: 2_770_000 picoseconds. + Weight::from_parts(2_875_000, 0) + // Standard Error: 281_295 + .saturating_add(Weight::from_parts(37_513_186, 0).saturating_mul(x.into())) + // Standard Error: 46_799 + .saturating_add(Weight::from_parts(7_949_936, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) } /// Storage: Identity SubsOf (r:600 w:0) @@ -654,12 +654,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 3_998_000 picoseconds. - Weight::from_parts(4_142_000, 990) - // Standard Error: 2_992 - .saturating_add(Weight::from_parts(62_923, 0).saturating_mul(x.into())) - // Standard Error: 497 - .saturating_add(Weight::from_parts(1_122_767, 0).saturating_mul(n.into())) + // Minimum execution time: 2_751_000 picoseconds. + Weight::from_parts(2_862_000, 990) + // Standard Error: 953 + .saturating_add(Weight::from_parts(28_947, 0).saturating_mul(x.into())) + // Standard Error: 158 + .saturating_add(Weight::from_parts(994_085, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5733).saturating_mul(n.into())) @@ -674,12 +674,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 4_019_000 picoseconds. - Weight::from_parts(4_174_000, 990) - // Standard Error: 2_026_537 - .saturating_add(Weight::from_parts(127_217_493, 0).saturating_mul(s.into())) - // Standard Error: 337_157 - .saturating_add(Weight::from_parts(22_199_440, 0).saturating_mul(n.into())) + // Minimum execution time: 2_671_000 picoseconds. + Weight::from_parts(2_814_000, 990) + // Standard Error: 785_159 + .saturating_add(Weight::from_parts(109_659_566, 0).saturating_mul(s.into())) + // Standard Error: 130_628 + .saturating_add(Weight::from_parts(19_169_269, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -696,10 +696,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `474 + s * (36 ±0)` // Estimated: `11003` - // Minimum execution time: 16_361_000 picoseconds. - Weight::from_parts(20_622_408, 11003) - // Standard Error: 592 - .saturating_add(Weight::from_parts(48_502, 0).saturating_mul(s.into())) + // Minimum execution time: 12_571_000 picoseconds. + Weight::from_parts(16_366_301, 11003) + // Standard Error: 217 + .saturating_add(Weight::from_parts(42_542, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -712,10 +712,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `590 + s * (3 ±0)` // Estimated: `11003` - // Minimum execution time: 9_676_000 picoseconds. - Weight::from_parts(11_336_454, 11003) - // Standard Error: 240 - .saturating_add(Weight::from_parts(17_924, 0).saturating_mul(s.into())) + // Minimum execution time: 7_278_000 picoseconds. + Weight::from_parts(9_227_799, 11003) + // Standard Error: 104 + .saturating_add(Weight::from_parts(14_014, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -730,10 +730,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `637 + s * (35 ±0)` // Estimated: `11003` - // Minimum execution time: 20_304_000 picoseconds. - Weight::from_parts(22_890_354, 11003) - // Standard Error: 1_568 - .saturating_add(Weight::from_parts(40_002, 0).saturating_mul(s.into())) + // Minimum execution time: 15_771_000 picoseconds. + Weight::from_parts(18_105_475, 11003) + // Standard Error: 129 + .saturating_add(Weight::from_parts(32_074, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -748,10 +748,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `703 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 17_645_000 picoseconds. - Weight::from_parts(20_080_248, 6723) - // Standard Error: 1_331 - .saturating_add(Weight::from_parts(42_353, 0).saturating_mul(s.into())) + // Minimum execution time: 14_093_000 picoseconds. + Weight::from_parts(16_125_177, 6723) + // Standard Error: 146 + .saturating_add(Weight::from_parts(39_270, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/pallets/maintenance/src/weights.rs b/pallets/maintenance/src/weights.rs index 889fee5118..526fa8cc12 100644 --- a/pallets/maintenance/src/weights.rs +++ b/pallets/maintenance/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_maintenance //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/maintenance/src/weights.rs @@ -47,8 +47,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_227_000 picoseconds. - Weight::from_parts(4_445_000, 0) + // Minimum execution time: 3_015_000 picoseconds. + Weight::from_parts(3_184_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Maintenance Enabled (r:0 w:1) @@ -57,8 +57,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_381_000 picoseconds. - Weight::from_parts(4_576_000, 0) + // Minimum execution time: 2_976_000 picoseconds. + Weight::from_parts(3_111_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Preimage StatusFor (r:1 w:0) @@ -69,8 +69,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `209` // Estimated: `3674` - // Minimum execution time: 10_106_000 picoseconds. - Weight::from_parts(10_311_000, 3674) + // Minimum execution time: 7_359_000 picoseconds. + Weight::from_parts(7_613_000, 3674) .saturating_add(T::DbWeight::get().reads(2_u64)) } } @@ -83,8 +83,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_227_000 picoseconds. - Weight::from_parts(4_445_000, 0) + // Minimum execution time: 3_015_000 picoseconds. + Weight::from_parts(3_184_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Maintenance Enabled (r:0 w:1) @@ -93,8 +93,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_381_000 picoseconds. - Weight::from_parts(4_576_000, 0) + // Minimum execution time: 2_976_000 picoseconds. + Weight::from_parts(3_111_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Preimage StatusFor (r:1 w:0) @@ -105,8 +105,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `209` // Estimated: `3674` - // Minimum execution time: 10_106_000 picoseconds. - Weight::from_parts(10_311_000, 3674) + // Minimum execution time: 7_359_000 picoseconds. + Weight::from_parts(7_613_000, 3674) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/pallets/nonfungible/src/weights.rs b/pallets/nonfungible/src/weights.rs index a98070041c..95961bbb59 100644 --- a/pallets/nonfungible/src/weights.rs +++ b/pallets/nonfungible/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_nonfungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/nonfungible/src/weights.rs @@ -69,8 +69,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 14_096_000 picoseconds. - Weight::from_parts(14_490_000, 3530) + // Minimum execution time: 10_723_000 picoseconds. + Weight::from_parts(11_038_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -87,10 +87,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 4_157_000 picoseconds. - Weight::from_parts(2_679_830, 3530) - // Standard Error: 3_040 - .saturating_add(Weight::from_parts(4_359_013, 0).saturating_mul(b.into())) + // Minimum execution time: 3_170_000 picoseconds. + Weight::from_parts(2_230_822, 3530) + // Standard Error: 417 + .saturating_add(Weight::from_parts(3_552_754, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -108,10 +108,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 4_271_000 picoseconds. - Weight::from_parts(4_483_000, 3481) - // Standard Error: 2_384 - .saturating_add(Weight::from_parts(5_835_967, 0).saturating_mul(b.into())) + // Minimum execution time: 3_234_000 picoseconds. + Weight::from_parts(3_359_000, 3481) + // Standard Error: 598 + .saturating_add(Weight::from_parts(4_872_803, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -136,8 +136,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 23_512_000 picoseconds. - Weight::from_parts(24_018_000, 3530) + // Minimum execution time: 17_970_000 picoseconds. + Weight::from_parts(18_458_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -159,8 +159,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 29_808_000 picoseconds. - Weight::from_parts(30_221_000, 3530) + // Minimum execution time: 22_995_000 picoseconds. + Weight::from_parts(23_505_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -185,10 +185,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 29_778_000 picoseconds. - Weight::from_parts(30_144_000, 5874) - // Standard Error: 211_038 - .saturating_add(Weight::from_parts(71_816_636, 0).saturating_mul(b.into())) + // Minimum execution time: 22_854_000 picoseconds. + Weight::from_parts(23_291_000, 5874) + // Standard Error: 83_594 + .saturating_add(Weight::from_parts(59_695_924, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -207,8 +207,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 17_854_000 picoseconds. - Weight::from_parts(18_244_000, 6070) + // Minimum execution time: 13_873_000 picoseconds. + Weight::from_parts(14_318_000, 6070) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -220,8 +220,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 10_545_000 picoseconds. - Weight::from_parts(10_842_000, 3522) + // Minimum execution time: 7_895_000 picoseconds. + Weight::from_parts(8_131_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -233,8 +233,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 10_536_000 picoseconds. - Weight::from_parts(10_811_000, 3522) + // Minimum execution time: 7_931_000 picoseconds. + Weight::from_parts(8_185_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -244,8 +244,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 5_351_000 picoseconds. - Weight::from_parts(5_501_000, 3522) + // Minimum execution time: 4_111_000 picoseconds. + Weight::from_parts(4_280_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -266,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 28_847_000 picoseconds. - Weight::from_parts(29_327_000, 3530) + // Minimum execution time: 21_756_000 picoseconds. + Weight::from_parts(22_237_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -278,10 +278,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 2_006_000 picoseconds. - Weight::from_parts(2_103_000, 20191) - // Standard Error: 46_810 - .saturating_add(Weight::from_parts(11_403_774, 0).saturating_mul(b.into())) + // Minimum execution time: 1_411_000 picoseconds. + Weight::from_parts(1_505_000, 20191) + // Standard Error: 14_327 + .saturating_add(Weight::from_parts(8_366_431, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -296,10 +296,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 5_038_000 picoseconds. - Weight::from_parts(11_734_155, 36269) - // Standard Error: 13_521 - .saturating_add(Weight::from_parts(5_311_979, 0).saturating_mul(b.into())) + // Minimum execution time: 3_628_000 picoseconds. + Weight::from_parts(1_438_616, 36269) + // Standard Error: 13_799 + .saturating_add(Weight::from_parts(5_486_048, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -312,10 +312,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 1_599_000 picoseconds. - Weight::from_parts(1_659_000, 20191) - // Standard Error: 77_681 - .saturating_add(Weight::from_parts(6_883_549, 0).saturating_mul(b.into())) + // Minimum execution time: 1_016_000 picoseconds. + Weight::from_parts(4_628_460, 20191) + // Standard Error: 23_738 + .saturating_add(Weight::from_parts(5_023_391, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -330,10 +330,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 4_916_000 picoseconds. - Weight::from_parts(5_018_000, 36269) - // Standard Error: 84_636 - .saturating_add(Weight::from_parts(24_588_007, 0).saturating_mul(b.into())) + // Minimum execution time: 3_576_000 picoseconds. + Weight::from_parts(3_709_000, 36269) + // Standard Error: 36_977 + .saturating_add(Weight::from_parts(23_798_574, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -343,8 +343,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 4_667_000 picoseconds. - Weight::from_parts(4_879_000, 3522) + // Minimum execution time: 3_614_000 picoseconds. + Weight::from_parts(3_776_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -353,8 +353,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_087_000 picoseconds. - Weight::from_parts(6_270_000, 0) + // Minimum execution time: 4_345_000 picoseconds. + Weight::from_parts(4_555_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -363,8 +363,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 3_563_000 picoseconds. - Weight::from_parts(3_683_000, 3576) + // Minimum execution time: 2_810_000 picoseconds. + Weight::from_parts(2_982_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -373,8 +373,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 4_005_000 picoseconds. - Weight::from_parts(4_191_000, 36269) + // Minimum execution time: 3_050_000 picoseconds. + Weight::from_parts(3_169_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -394,8 +394,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 14_096_000 picoseconds. - Weight::from_parts(14_490_000, 3530) + // Minimum execution time: 10_723_000 picoseconds. + Weight::from_parts(11_038_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -412,10 +412,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 4_157_000 picoseconds. - Weight::from_parts(2_679_830, 3530) - // Standard Error: 3_040 - .saturating_add(Weight::from_parts(4_359_013, 0).saturating_mul(b.into())) + // Minimum execution time: 3_170_000 picoseconds. + Weight::from_parts(2_230_822, 3530) + // Standard Error: 417 + .saturating_add(Weight::from_parts(3_552_754, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -433,10 +433,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 4_271_000 picoseconds. - Weight::from_parts(4_483_000, 3481) - // Standard Error: 2_384 - .saturating_add(Weight::from_parts(5_835_967, 0).saturating_mul(b.into())) + // Minimum execution time: 3_234_000 picoseconds. + Weight::from_parts(3_359_000, 3481) + // Standard Error: 598 + .saturating_add(Weight::from_parts(4_872_803, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -461,8 +461,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 23_512_000 picoseconds. - Weight::from_parts(24_018_000, 3530) + // Minimum execution time: 17_970_000 picoseconds. + Weight::from_parts(18_458_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -484,8 +484,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 29_808_000 picoseconds. - Weight::from_parts(30_221_000, 3530) + // Minimum execution time: 22_995_000 picoseconds. + Weight::from_parts(23_505_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -510,10 +510,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 29_778_000 picoseconds. - Weight::from_parts(30_144_000, 5874) - // Standard Error: 211_038 - .saturating_add(Weight::from_parts(71_816_636, 0).saturating_mul(b.into())) + // Minimum execution time: 22_854_000 picoseconds. + Weight::from_parts(23_291_000, 5874) + // Standard Error: 83_594 + .saturating_add(Weight::from_parts(59_695_924, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -532,8 +532,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 17_854_000 picoseconds. - Weight::from_parts(18_244_000, 6070) + // Minimum execution time: 13_873_000 picoseconds. + Weight::from_parts(14_318_000, 6070) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -545,8 +545,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 10_545_000 picoseconds. - Weight::from_parts(10_842_000, 3522) + // Minimum execution time: 7_895_000 picoseconds. + Weight::from_parts(8_131_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -558,8 +558,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 10_536_000 picoseconds. - Weight::from_parts(10_811_000, 3522) + // Minimum execution time: 7_931_000 picoseconds. + Weight::from_parts(8_185_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -569,8 +569,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 5_351_000 picoseconds. - Weight::from_parts(5_501_000, 3522) + // Minimum execution time: 4_111_000 picoseconds. + Weight::from_parts(4_280_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -591,8 +591,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 28_847_000 picoseconds. - Weight::from_parts(29_327_000, 3530) + // Minimum execution time: 21_756_000 picoseconds. + Weight::from_parts(22_237_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -603,10 +603,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 2_006_000 picoseconds. - Weight::from_parts(2_103_000, 20191) - // Standard Error: 46_810 - .saturating_add(Weight::from_parts(11_403_774, 0).saturating_mul(b.into())) + // Minimum execution time: 1_411_000 picoseconds. + Weight::from_parts(1_505_000, 20191) + // Standard Error: 14_327 + .saturating_add(Weight::from_parts(8_366_431, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -621,10 +621,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 5_038_000 picoseconds. - Weight::from_parts(11_734_155, 36269) - // Standard Error: 13_521 - .saturating_add(Weight::from_parts(5_311_979, 0).saturating_mul(b.into())) + // Minimum execution time: 3_628_000 picoseconds. + Weight::from_parts(1_438_616, 36269) + // Standard Error: 13_799 + .saturating_add(Weight::from_parts(5_486_048, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -637,10 +637,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 1_599_000 picoseconds. - Weight::from_parts(1_659_000, 20191) - // Standard Error: 77_681 - .saturating_add(Weight::from_parts(6_883_549, 0).saturating_mul(b.into())) + // Minimum execution time: 1_016_000 picoseconds. + Weight::from_parts(4_628_460, 20191) + // Standard Error: 23_738 + .saturating_add(Weight::from_parts(5_023_391, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -655,10 +655,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 4_916_000 picoseconds. - Weight::from_parts(5_018_000, 36269) - // Standard Error: 84_636 - .saturating_add(Weight::from_parts(24_588_007, 0).saturating_mul(b.into())) + // Minimum execution time: 3_576_000 picoseconds. + Weight::from_parts(3_709_000, 36269) + // Standard Error: 36_977 + .saturating_add(Weight::from_parts(23_798_574, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -668,8 +668,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 4_667_000 picoseconds. - Weight::from_parts(4_879_000, 3522) + // Minimum execution time: 3_614_000 picoseconds. + Weight::from_parts(3_776_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -678,8 +678,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_087_000 picoseconds. - Weight::from_parts(6_270_000, 0) + // Minimum execution time: 4_345_000 picoseconds. + Weight::from_parts(4_555_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -688,8 +688,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 3_563_000 picoseconds. - Weight::from_parts(3_683_000, 3576) + // Minimum execution time: 2_810_000 picoseconds. + Weight::from_parts(2_982_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -698,8 +698,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 4_005_000 picoseconds. - Weight::from_parts(4_191_000, 36269) + // Minimum execution time: 3_050_000 picoseconds. + Weight::from_parts(3_169_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/refungible/src/weights.rs b/pallets/refungible/src/weights.rs index 52e1f1fa58..a892487c26 100644 --- a/pallets/refungible/src/weights.rs +++ b/pallets/refungible/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_refungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/refungible/src/weights.rs @@ -78,8 +78,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 16_590_000 picoseconds. - Weight::from_parts(17_009_000, 3530) + // Minimum execution time: 12_168_000 picoseconds. + Weight::from_parts(12_531_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -98,10 +98,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 3_775_000 picoseconds. - Weight::from_parts(3_879_000, 3530) - // Standard Error: 3_161 - .saturating_add(Weight::from_parts(5_906_642, 0).saturating_mul(b.into())) + // Minimum execution time: 2_583_000 picoseconds. + Weight::from_parts(2_726_000, 3530) + // Standard Error: 1_051 + .saturating_add(Weight::from_parts(4_727_433, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -121,10 +121,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_776_000 picoseconds. - Weight::from_parts(3_877_000, 3481) - // Standard Error: 2_805 - .saturating_add(Weight::from_parts(7_369_476, 0).saturating_mul(b.into())) + // Minimum execution time: 2_565_000 picoseconds. + Weight::from_parts(2_710_000, 3481) + // Standard Error: 862 + .saturating_add(Weight::from_parts(5_951_711, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -146,10 +146,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 6_070_000 picoseconds. - Weight::from_parts(5_715_254, 3481) - // Standard Error: 3_252 - .saturating_add(Weight::from_parts(5_385_888, 0).saturating_mul(b.into())) + // Minimum execution time: 4_281_000 picoseconds. + Weight::from_parts(3_252_037, 3481) + // Standard Error: 568 + .saturating_add(Weight::from_parts(4_380_356, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -168,8 +168,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 28_877_000 picoseconds. - Weight::from_parts(29_326_000, 8682) + // Minimum execution time: 22_354_000 picoseconds. + Weight::from_parts(22_787_000, 8682) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -189,8 +189,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 27_570_000 picoseconds. - Weight::from_parts(28_241_000, 3554) + // Minimum execution time: 21_689_000 picoseconds. + Weight::from_parts(22_199_000, 3554) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -202,8 +202,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 17_508_000 picoseconds. - Weight::from_parts(17_819_000, 6118) + // Minimum execution time: 13_673_000 picoseconds. + Weight::from_parts(14_067_000, 6118) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -219,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 20_442_000 picoseconds. - Weight::from_parts(20_782_000, 6118) + // Minimum execution time: 15_863_000 picoseconds. + Weight::from_parts(16_197_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -236,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 23_151_000 picoseconds. - Weight::from_parts(23_518_000, 6118) + // Minimum execution time: 18_123_000 picoseconds. + Weight::from_parts(18_501_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -253,8 +253,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 22_861_000 picoseconds. - Weight::from_parts(23_178_000, 6118) + // Minimum execution time: 17_966_000 picoseconds. + Weight::from_parts(18_305_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -266,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 11_395_000 picoseconds. - Weight::from_parts(11_844_000, 3554) + // Minimum execution time: 8_636_000 picoseconds. + Weight::from_parts(8_882_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -279,8 +279,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 11_977_000 picoseconds. - Weight::from_parts(12_217_000, 3554) + // Minimum execution time: 8_756_000 picoseconds. + Weight::from_parts(8_978_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -294,8 +294,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 25_243_000 picoseconds. - Weight::from_parts(26_153_000, 6118) + // Minimum execution time: 19_417_000 picoseconds. + Weight::from_parts(19_945_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -313,8 +313,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 28_042_000 picoseconds. - Weight::from_parts(28_499_000, 6118) + // Minimum execution time: 21_425_000 picoseconds. + Weight::from_parts(21_829_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -332,8 +332,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 30_539_000 picoseconds. - Weight::from_parts(30_914_000, 6118) + // Minimum execution time: 23_854_000 picoseconds. + Weight::from_parts(24_352_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -351,8 +351,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 30_454_000 picoseconds. - Weight::from_parts(30_892_000, 6118) + // Minimum execution time: 23_775_000 picoseconds. + Weight::from_parts(24_236_000, 6118) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -374,8 +374,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 35_479_000 picoseconds. - Weight::from_parts(35_928_000, 3570) + // Minimum execution time: 27_885_000 picoseconds. + Weight::from_parts(28_492_000, 3570) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -386,10 +386,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 2_013_000 picoseconds. - Weight::from_parts(2_086_000, 20191) - // Standard Error: 45_248 - .saturating_add(Weight::from_parts(11_351_115, 0).saturating_mul(b.into())) + // Minimum execution time: 1_299_000 picoseconds. + Weight::from_parts(1_410_000, 20191) + // Standard Error: 14_247 + .saturating_add(Weight::from_parts(8_221_449, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -404,10 +404,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 3_998_000 picoseconds. - Weight::from_parts(10_420_739, 36269) - // Standard Error: 40_334 - .saturating_add(Weight::from_parts(5_989_594, 0).saturating_mul(b.into())) + // Minimum execution time: 2_717_000 picoseconds. + Weight::from_parts(6_076_231, 36269) + // Standard Error: 10_349 + .saturating_add(Weight::from_parts(4_950_943, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -420,10 +420,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 1_541_000 picoseconds. - Weight::from_parts(4_545_643, 20191) - // Standard Error: 33_290 - .saturating_add(Weight::from_parts(5_626_340, 0).saturating_mul(b.into())) + // Minimum execution time: 936_000 picoseconds. + Weight::from_parts(1_015_000, 20191) + // Standard Error: 9_051 + .saturating_add(Weight::from_parts(5_454_224, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -438,10 +438,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 3_768_000 picoseconds. - Weight::from_parts(3_906_000, 36269) - // Standard Error: 97_278 - .saturating_add(Weight::from_parts(24_661_474, 0).saturating_mul(b.into())) + // Minimum execution time: 2_707_000 picoseconds. + Weight::from_parts(2_851_000, 36269) + // Standard Error: 36_568 + .saturating_add(Weight::from_parts(23_557_445, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -453,8 +453,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 13_554_000 picoseconds. - Weight::from_parts(13_880_000, 3554) + // Minimum execution time: 10_556_000 picoseconds. + Weight::from_parts(10_904_000, 3554) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -464,8 +464,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 6_821_000 picoseconds. - Weight::from_parts(7_027_000, 6118) + // Minimum execution time: 4_708_000 picoseconds. + Weight::from_parts(4_974_000, 6118) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -474,8 +474,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_084_000 picoseconds. - Weight::from_parts(6_285_000, 0) + // Minimum execution time: 4_332_000 picoseconds. + Weight::from_parts(4_506_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -484,8 +484,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 2_938_000 picoseconds. - Weight::from_parts(3_031_000, 3576) + // Minimum execution time: 2_207_000 picoseconds. + Weight::from_parts(2_358_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -494,8 +494,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 2_892_000 picoseconds. - Weight::from_parts(3_056_000, 36269) + // Minimum execution time: 2_230_000 picoseconds. + Weight::from_parts(2_337_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -517,8 +517,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 16_590_000 picoseconds. - Weight::from_parts(17_009_000, 3530) + // Minimum execution time: 12_168_000 picoseconds. + Weight::from_parts(12_531_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -537,10 +537,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 3_775_000 picoseconds. - Weight::from_parts(3_879_000, 3530) - // Standard Error: 3_161 - .saturating_add(Weight::from_parts(5_906_642, 0).saturating_mul(b.into())) + // Minimum execution time: 2_583_000 picoseconds. + Weight::from_parts(2_726_000, 3530) + // Standard Error: 1_051 + .saturating_add(Weight::from_parts(4_727_433, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -560,10 +560,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_776_000 picoseconds. - Weight::from_parts(3_877_000, 3481) - // Standard Error: 2_805 - .saturating_add(Weight::from_parts(7_369_476, 0).saturating_mul(b.into())) + // Minimum execution time: 2_565_000 picoseconds. + Weight::from_parts(2_710_000, 3481) + // Standard Error: 862 + .saturating_add(Weight::from_parts(5_951_711, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -585,10 +585,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 6_070_000 picoseconds. - Weight::from_parts(5_715_254, 3481) - // Standard Error: 3_252 - .saturating_add(Weight::from_parts(5_385_888, 0).saturating_mul(b.into())) + // Minimum execution time: 4_281_000 picoseconds. + Weight::from_parts(3_252_037, 3481) + // Standard Error: 568 + .saturating_add(Weight::from_parts(4_380_356, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -607,8 +607,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 28_877_000 picoseconds. - Weight::from_parts(29_326_000, 8682) + // Minimum execution time: 22_354_000 picoseconds. + Weight::from_parts(22_787_000, 8682) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -628,8 +628,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 27_570_000 picoseconds. - Weight::from_parts(28_241_000, 3554) + // Minimum execution time: 21_689_000 picoseconds. + Weight::from_parts(22_199_000, 3554) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -641,8 +641,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 17_508_000 picoseconds. - Weight::from_parts(17_819_000, 6118) + // Minimum execution time: 13_673_000 picoseconds. + Weight::from_parts(14_067_000, 6118) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -658,8 +658,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 20_442_000 picoseconds. - Weight::from_parts(20_782_000, 6118) + // Minimum execution time: 15_863_000 picoseconds. + Weight::from_parts(16_197_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -675,8 +675,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 23_151_000 picoseconds. - Weight::from_parts(23_518_000, 6118) + // Minimum execution time: 18_123_000 picoseconds. + Weight::from_parts(18_501_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -692,8 +692,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 22_861_000 picoseconds. - Weight::from_parts(23_178_000, 6118) + // Minimum execution time: 17_966_000 picoseconds. + Weight::from_parts(18_305_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -705,8 +705,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 11_395_000 picoseconds. - Weight::from_parts(11_844_000, 3554) + // Minimum execution time: 8_636_000 picoseconds. + Weight::from_parts(8_882_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -718,8 +718,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 11_977_000 picoseconds. - Weight::from_parts(12_217_000, 3554) + // Minimum execution time: 8_756_000 picoseconds. + Weight::from_parts(8_978_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -733,8 +733,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 25_243_000 picoseconds. - Weight::from_parts(26_153_000, 6118) + // Minimum execution time: 19_417_000 picoseconds. + Weight::from_parts(19_945_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -752,8 +752,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 28_042_000 picoseconds. - Weight::from_parts(28_499_000, 6118) + // Minimum execution time: 21_425_000 picoseconds. + Weight::from_parts(21_829_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -771,8 +771,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 30_539_000 picoseconds. - Weight::from_parts(30_914_000, 6118) + // Minimum execution time: 23_854_000 picoseconds. + Weight::from_parts(24_352_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -790,8 +790,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 30_454_000 picoseconds. - Weight::from_parts(30_892_000, 6118) + // Minimum execution time: 23_775_000 picoseconds. + Weight::from_parts(24_236_000, 6118) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -813,8 +813,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 35_479_000 picoseconds. - Weight::from_parts(35_928_000, 3570) + // Minimum execution time: 27_885_000 picoseconds. + Weight::from_parts(28_492_000, 3570) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -825,10 +825,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 2_013_000 picoseconds. - Weight::from_parts(2_086_000, 20191) - // Standard Error: 45_248 - .saturating_add(Weight::from_parts(11_351_115, 0).saturating_mul(b.into())) + // Minimum execution time: 1_299_000 picoseconds. + Weight::from_parts(1_410_000, 20191) + // Standard Error: 14_247 + .saturating_add(Weight::from_parts(8_221_449, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -843,10 +843,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 3_998_000 picoseconds. - Weight::from_parts(10_420_739, 36269) - // Standard Error: 40_334 - .saturating_add(Weight::from_parts(5_989_594, 0).saturating_mul(b.into())) + // Minimum execution time: 2_717_000 picoseconds. + Weight::from_parts(6_076_231, 36269) + // Standard Error: 10_349 + .saturating_add(Weight::from_parts(4_950_943, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -859,10 +859,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `317 + b * (261 ±0)` // Estimated: `20191` - // Minimum execution time: 1_541_000 picoseconds. - Weight::from_parts(4_545_643, 20191) - // Standard Error: 33_290 - .saturating_add(Weight::from_parts(5_626_340, 0).saturating_mul(b.into())) + // Minimum execution time: 936_000 picoseconds. + Weight::from_parts(1_015_000, 20191) + // Standard Error: 9_051 + .saturating_add(Weight::from_parts(5_454_224, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -877,10 +877,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 3_768_000 picoseconds. - Weight::from_parts(3_906_000, 36269) - // Standard Error: 97_278 - .saturating_add(Weight::from_parts(24_661_474, 0).saturating_mul(b.into())) + // Minimum execution time: 2_707_000 picoseconds. + Weight::from_parts(2_851_000, 36269) + // Standard Error: 36_568 + .saturating_add(Weight::from_parts(23_557_445, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -892,8 +892,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 13_554_000 picoseconds. - Weight::from_parts(13_880_000, 3554) + // Minimum execution time: 10_556_000 picoseconds. + Weight::from_parts(10_904_000, 3554) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -903,8 +903,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 6_821_000 picoseconds. - Weight::from_parts(7_027_000, 6118) + // Minimum execution time: 4_708_000 picoseconds. + Weight::from_parts(4_974_000, 6118) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -913,8 +913,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_084_000 picoseconds. - Weight::from_parts(6_285_000, 0) + // Minimum execution time: 4_332_000 picoseconds. + Weight::from_parts(4_506_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -923,8 +923,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 2_938_000 picoseconds. - Weight::from_parts(3_031_000, 3576) + // Minimum execution time: 2_207_000 picoseconds. + Weight::from_parts(2_358_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -933,8 +933,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 2_892_000 picoseconds. - Weight::from_parts(3_056_000, 36269) + // Minimum execution time: 2_230_000 picoseconds. + Weight::from_parts(2_337_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/structure/src/weights.rs b/pallets/structure/src/weights.rs index b0f9e385b6..11fe06285f 100644 --- a/pallets/structure/src/weights.rs +++ b/pallets/structure/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_structure //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/structure/src/weights.rs @@ -47,8 +47,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `667` // Estimated: `4325` - // Minimum execution time: 9_310_000 picoseconds. - Weight::from_parts(9_551_000, 4325) + // Minimum execution time: 7_344_000 picoseconds. + Weight::from_parts(7_578_000, 4325) .saturating_add(T::DbWeight::get().reads(2_u64)) } } @@ -63,8 +63,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `667` // Estimated: `4325` - // Minimum execution time: 9_310_000 picoseconds. - Weight::from_parts(9_551_000, 4325) + // Minimum execution time: 7_344_000 picoseconds. + Weight::from_parts(7_578_000, 4325) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/pallets/unique/src/weights.rs b/pallets/unique/src/weights.rs index fcca221500..7eb0553f02 100644 --- a/pallets/unique/src/weights.rs +++ b/pallets/unique/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_unique //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/unique/src/weights.rs @@ -69,8 +69,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `245` // Estimated: `6196` - // Minimum execution time: 32_963_000 picoseconds. - Weight::from_parts(33_785_000, 6196) + // Minimum execution time: 26_618_000 picoseconds. + Weight::from_parts(27_287_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -92,8 +92,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1200` // Estimated: `4325` - // Minimum execution time: 46_962_000 picoseconds. - Weight::from_parts(47_997_000, 4325) + // Minimum execution time: 37_428_000 picoseconds. + Weight::from_parts(38_258_000, 4325) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -105,8 +105,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 13_657_000 picoseconds. - Weight::from_parts(13_870_000, 4325) + // Minimum execution time: 9_968_000 picoseconds. + Weight::from_parts(10_388_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -118,8 +118,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1033` // Estimated: `4325` - // Minimum execution time: 13_162_000 picoseconds. - Weight::from_parts(13_458_000, 4325) + // Minimum execution time: 9_600_000 picoseconds. + Weight::from_parts(9_974_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -129,8 +129,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 12_614_000 picoseconds. - Weight::from_parts(12_968_000, 4325) + // Minimum execution time: 9_185_000 picoseconds. + Weight::from_parts(9_525_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -144,8 +144,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1012` // Estimated: `4325` - // Minimum execution time: 16_794_000 picoseconds. - Weight::from_parts(17_161_000, 4325) + // Minimum execution time: 12_704_000 picoseconds. + Weight::from_parts(13_115_000, 4325) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -159,8 +159,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1107` // Estimated: `4325` - // Minimum execution time: 18_145_000 picoseconds. - Weight::from_parts(18_527_000, 4325) + // Minimum execution time: 14_185_000 picoseconds. + Weight::from_parts(14_492_000, 4325) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -170,8 +170,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 12_649_000 picoseconds. - Weight::from_parts(12_953_000, 4325) + // Minimum execution time: 9_217_000 picoseconds. + Weight::from_parts(9_499_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -181,8 +181,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 12_310_000 picoseconds. - Weight::from_parts(12_578_000, 4325) + // Minimum execution time: 8_993_000 picoseconds. + Weight::from_parts(9_264_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -192,8 +192,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 12_312_000 picoseconds. - Weight::from_parts(12_567_000, 4325) + // Minimum execution time: 8_804_000 picoseconds. + Weight::from_parts(9_302_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -203,8 +203,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 8_315_000 picoseconds. - Weight::from_parts(8_489_000, 4325) + // Minimum execution time: 5_985_000 picoseconds. + Weight::from_parts(6_155_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -214,8 +214,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 13_135_000 picoseconds. - Weight::from_parts(13_484_000, 4325) + // Minimum execution time: 9_288_000 picoseconds. + Weight::from_parts(9_608_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -225,8 +225,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 6_462_000 picoseconds. - Weight::from_parts(6_664_000, 44457) + // Minimum execution time: 4_904_000 picoseconds. + Weight::from_parts(5_142_000, 44457) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -252,8 +252,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `245` // Estimated: `6196` - // Minimum execution time: 32_963_000 picoseconds. - Weight::from_parts(33_785_000, 6196) + // Minimum execution time: 26_618_000 picoseconds. + Weight::from_parts(27_287_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -275,8 +275,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1200` // Estimated: `4325` - // Minimum execution time: 46_962_000 picoseconds. - Weight::from_parts(47_997_000, 4325) + // Minimum execution time: 37_428_000 picoseconds. + Weight::from_parts(38_258_000, 4325) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -288,8 +288,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 13_657_000 picoseconds. - Weight::from_parts(13_870_000, 4325) + // Minimum execution time: 9_968_000 picoseconds. + Weight::from_parts(10_388_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -301,8 +301,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1033` // Estimated: `4325` - // Minimum execution time: 13_162_000 picoseconds. - Weight::from_parts(13_458_000, 4325) + // Minimum execution time: 9_600_000 picoseconds. + Weight::from_parts(9_974_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -312,8 +312,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 12_614_000 picoseconds. - Weight::from_parts(12_968_000, 4325) + // Minimum execution time: 9_185_000 picoseconds. + Weight::from_parts(9_525_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -327,8 +327,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1012` // Estimated: `4325` - // Minimum execution time: 16_794_000 picoseconds. - Weight::from_parts(17_161_000, 4325) + // Minimum execution time: 12_704_000 picoseconds. + Weight::from_parts(13_115_000, 4325) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -342,8 +342,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1107` // Estimated: `4325` - // Minimum execution time: 18_145_000 picoseconds. - Weight::from_parts(18_527_000, 4325) + // Minimum execution time: 14_185_000 picoseconds. + Weight::from_parts(14_492_000, 4325) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -353,8 +353,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 12_649_000 picoseconds. - Weight::from_parts(12_953_000, 4325) + // Minimum execution time: 9_217_000 picoseconds. + Weight::from_parts(9_499_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -364,8 +364,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 12_310_000 picoseconds. - Weight::from_parts(12_578_000, 4325) + // Minimum execution time: 8_993_000 picoseconds. + Weight::from_parts(9_264_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -375,8 +375,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 12_312_000 picoseconds. - Weight::from_parts(12_567_000, 4325) + // Minimum execution time: 8_804_000 picoseconds. + Weight::from_parts(9_302_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -386,8 +386,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 8_315_000 picoseconds. - Weight::from_parts(8_489_000, 4325) + // Minimum execution time: 5_985_000 picoseconds. + Weight::from_parts(6_155_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -397,8 +397,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 13_135_000 picoseconds. - Weight::from_parts(13_484_000, 4325) + // Minimum execution time: 9_288_000 picoseconds. + Weight::from_parts(9_608_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -408,8 +408,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 6_462_000 picoseconds. - Weight::from_parts(6_664_000, 44457) + // Minimum execution time: 4_904_000 picoseconds. + Weight::from_parts(5_142_000, 44457) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/primitives/common/src/constants.rs b/primitives/common/src/constants.rs index 5ae2b3e6f1..b290e82add 100644 --- a/primitives/common/src/constants.rs +++ b/primitives/common/src/constants.rs @@ -52,10 +52,10 @@ pub const MAX_COLLATORS: u32 = 10; pub const SESSION_LENGTH: BlockNumber = HOURS; // Targeting 0.1 UNQ per transfer -pub const WEIGHT_TO_FEE_COEFF: u64 = /**/76_902_456_736_428_438/**/; +pub const WEIGHT_TO_FEE_COEFF: u64 = /**/77_300_265_101_007_172/**/; // Targeting 0.15 UNQ per transfer via ETH -pub const MIN_GAS_PRICE: u64 = /**/1_908_931_253_022/**/; +pub const MIN_GAS_PRICE: u64 = /**/1_920_221_209_483/**/; /// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. /// This is used to limit the maximal weight of a single extrinsic. diff --git a/runtime/common/weights/xcm.rs b/runtime/common/weights/xcm.rs index 7d1dd12a73..a9ad8b1ca7 100644 --- a/runtime/common/weights/xcm.rs +++ b/runtime/common/weights/xcm.rs @@ -3,12 +3,12 @@ //! Autogenerated weights for pallet_xcm //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: 80, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-26, STEPS: `50`, REPEAT: 400, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/release/unique-collator +// target/production/unique-collator // benchmark // pallet // --pallet @@ -19,7 +19,7 @@ // * // --template=.maintain/external-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./runtime/common/weights/xcm.rs @@ -49,8 +49,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `278` // Estimated: `3743` - // Minimum execution time: 16_743_000 picoseconds. - Weight::from_parts(17_221_000, 3743) + // Minimum execution time: 12_999_000 picoseconds. + Weight::from_parts(13_426_000, 3743) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -60,8 +60,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 13_575_000 picoseconds. - Weight::from_parts(13_972_000, 1489) + // Minimum execution time: 10_299_000 picoseconds. + Weight::from_parts(10_647_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: ParachainInfo ParachainId (r:1 w:0) @@ -70,16 +70,16 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 13_540_000 picoseconds. - Weight::from_parts(13_855_000, 1489) + // Minimum execution time: 10_094_000 picoseconds. + Weight::from_parts(10_464_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_101_000 picoseconds. - Weight::from_parts(5_262_000, 0) + // Minimum execution time: 3_485_000 picoseconds. + Weight::from_parts(3_664_000, 0) } /// Storage: PolkadotXcm SupportedVersion (r:0 w:1) /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) @@ -87,8 +87,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_433_000 picoseconds. - Weight::from_parts(5_609_000, 0) + // Minimum execution time: 3_717_000 picoseconds. + Weight::from_parts(3_866_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) @@ -97,8 +97,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_748_000 picoseconds. - Weight::from_parts(1_870_000, 0) + // Minimum execution time: 1_328_000 picoseconds. + Weight::from_parts(1_400_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) @@ -121,8 +121,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `278` // Estimated: `3743` - // Minimum execution time: 20_053_000 picoseconds. - Weight::from_parts(20_382_000, 3743) + // Minimum execution time: 16_057_000 picoseconds. + Weight::from_parts(16_483_000, 3743) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -144,8 +144,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `461` // Estimated: `3926` - // Minimum execution time: 22_404_000 picoseconds. - Weight::from_parts(22_801_000, 3926) + // Minimum execution time: 18_009_000 picoseconds. + Weight::from_parts(18_565_000, 3926) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -155,8 +155,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_839_000 picoseconds. - Weight::from_parts(1_954_000, 0) + // Minimum execution time: 1_378_000 picoseconds. + Weight::from_parts(1_447_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: PolkadotXcm SupportedVersion (r:4 w:2) @@ -165,8 +165,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `196` // Estimated: `11086` - // Minimum execution time: 14_147_000 picoseconds. - Weight::from_parts(14_492_000, 11086) + // Minimum execution time: 10_770_000 picoseconds. + Weight::from_parts(11_090_000, 11086) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -176,8 +176,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `200` // Estimated: `11090` - // Minimum execution time: 14_046_000 picoseconds. - Weight::from_parts(14_424_000, 11090) + // Minimum execution time: 10_760_000 picoseconds. + Weight::from_parts(11_091_000, 11090) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -187,8 +187,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `207` // Estimated: `13572` - // Minimum execution time: 15_314_000 picoseconds. - Weight::from_parts(15_624_000, 13572) + // Minimum execution time: 12_026_000 picoseconds. + Weight::from_parts(12_321_000, 13572) .saturating_add(T::DbWeight::get().reads(5_u64)) } /// Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) @@ -207,8 +207,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `345` // Estimated: `6285` - // Minimum execution time: 20_624_000 picoseconds. - Weight::from_parts(20_928_000, 6285) + // Minimum execution time: 15_508_000 picoseconds. + Weight::from_parts(15_885_000, 6285) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -218,8 +218,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `8654` - // Minimum execution time: 7_429_000 picoseconds. - Weight::from_parts(7_661_000, 8654) + // Minimum execution time: 5_580_000 picoseconds. + Weight::from_parts(5_753_000, 8654) .saturating_add(T::DbWeight::get().reads(3_u64)) } /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) @@ -228,8 +228,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `207` // Estimated: `11097` - // Minimum execution time: 14_558_000 picoseconds. - Weight::from_parts(14_877_000, 11097) + // Minimum execution time: 10_951_000 picoseconds. + Weight::from_parts(11_341_000, 11097) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -249,8 +249,8 @@ impl pallet_xcm::WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `11239` - // Minimum execution time: 25_587_000 picoseconds. - Weight::from_parts(26_111_000, 11239) + // Minimum execution time: 19_990_000 picoseconds. + Weight::from_parts(20_433_000, 11239) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } From 2244ff0d2e833146769070253a41d5f5724e3ec2 Mon Sep 17 00:00:00 2001 From: Konstantin Astakhov Date: Wed, 27 Sep 2023 14:07:00 +0700 Subject: [PATCH 046/143] ci(try-runtime): fix false-positive --- .github/workflows/try-runtime.yml | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/.github/workflows/try-runtime.yml b/.github/workflows/try-runtime.yml index d687154da6..4cbebc2b92 100644 --- a/.github/workflows/try-runtime.yml +++ b/.github/workflows/try-runtime.yml @@ -210,20 +210,10 @@ jobs: return 1 fi else - DOCKER_LOGS=$(do_docker_logs) - while [ $counter_life -lt 3 ] - do - (( counter_life++ )) - if [[ -n ${DOCKER_LOGS} ]]; then - echo "TryRuntime_on_runtime_upgrade executed without errors" - return 0 - else - echo "Container try-runtime not RUNNING" - return 1 - fi - done + (( counter_life++ )) + echo "Container try-runtime not RUNNING" $counter_life "time" + return 1 fi - exit 0 } while ! is_started; do @@ -231,10 +221,10 @@ jobs: sleep 30s counter=$(( $counter - 1 )) echo "Counter: $counter" - if [ "$counter" -gt "0" ]; then + if [ "$counter" -gt "0" ] && [ "$counter_life" -lt "3" ]; then continue else - echo "Counter reached zero, yet upgrade is not finished" + echo "Counter reached zero, yet upgrade is not finished correctly or Container try-runtime is not RUNNING" exit 1 fi done From d2c9363e553de0162a0777b733f008fc30b3d50a Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Wed, 27 Sep 2023 10:01:01 +0000 Subject: [PATCH 047/143] tests: performance test for NFT tokens --- tests/package.json | 1 + tests/src/performance.test.ts | 175 ++++++++++++++++++++++++++++++++++ 2 files changed, 176 insertions(+) create mode 100644 tests/src/performance.test.ts diff --git a/tests/package.json b/tests/package.json index 7a5668717b..9c60c386bb 100644 --- a/tests/package.json +++ b/tests/package.json @@ -48,6 +48,7 @@ "testEthFractionalizer": "yarn _test './**/eth/fractionalizer/**/*.*test.ts'", "testEthMarketplace": "yarn _test './**/eth/marketplace/**/*.*test.ts'", "testEthMarket": "yarn _test './**/eth/marketplace-v2/**/*.*test.ts'", + "testPerformance": "yarn _test ./**/performance.*test.ts", "testSub": "yarn _test './**/sub/**/*.*test.ts'", "testSubNesting": "yarn _test './**/sub/nesting/**/*.*test.ts'", "testEvent": "yarn _test ./src/check-event/*.*test.ts", diff --git a/tests/src/performance.test.ts b/tests/src/performance.test.ts new file mode 100644 index 0000000000..1322bfd597 --- /dev/null +++ b/tests/src/performance.test.ts @@ -0,0 +1,175 @@ +// Copyright 2019-2023 Unique Network (Gibraltar) Ltd. +// This file is part of Unique Network. + +// Unique Network is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Unique Network is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Unique Network. If not, see . + +import {ApiPromise} from '@polkadot/api'; +import {IKeyringPair} from '@polkadot/types/types'; +import {expect, itSub, usingPlaygrounds} from './util'; +import {ICrossAccountId, IProperty} from './util/playgrounds/types'; +import {UniqueHelper} from './util/playgrounds/unique'; + +describe('Performace tests', () => { + let alice: IKeyringPair; + const MAX_TOKENS_TO_MINT = 200; + + before(async () => { + await usingPlaygrounds(async (helper, privateKey) => { + const donor = await privateKey({url: import.meta.url}); + [alice] = await helper.arrange.createAccounts([100_000n], donor); + }); + }); + + itSub('NFT tokens minting', async ({helper}) => { + const propertyKey = 'prop-a'; + const collection = await helper.nft.mintCollection(alice, { + name: 'test properties', + description: 'test properties collection', + tokenPrefix: 'TPC', + tokenPropertyPermissions: [ + {key: propertyKey, permission: {mutable: true, collectionAdmin: true, tokenOwner: true}}, + ], + }); + + + const results = []; + const step = 1_000; + const sizeOfKey = sizeOfEncodedStr(propertyKey); + let currentSize = step; + let startCount = 0; + let minterFunc = tryMintUnsafeRPC; + try { + startCount = await tryMintUnsafeRPC(helper, alice, MAX_TOKENS_TO_MINT, collection.collectionId, {Substrate: alice.address}); + } + catch (e) { + startCount = await tryMintExplicit(helper, alice, MAX_TOKENS_TO_MINT, collection.collectionId, {Substrate: alice.address}); + minterFunc = tryMintExplicit; + } + results.push({propertySize: 0, tokens: startCount}); + + while(currentSize <= 32_000) { + const property = {key: propertyKey, value: 'A'.repeat(currentSize - sizeOfKey - sizeOfInt(currentSize))}; + const maxTokens = Math.ceil(results.map(x => x.tokens).reduce((a, b) => a + b) / results.length); + const tokens = await minterFunc(helper, alice, maxTokens, collection.collectionId, {Substrate: alice.address}, property); + results.push({propertySize: sizeOfProperty(property), tokens}); + currentSize += step; + await helper.wait.newBlocks(2); + } + + expect(results).to.be.deep.equal([ + {propertySize: 0, tokens: 200}, + {propertySize: 1000, tokens: 149}, + {propertySize: 2000, tokens: 149}, + {propertySize: 3000, tokens: 149}, + {propertySize: 4000, tokens: 149}, + {propertySize: 5000, tokens: 149}, + {propertySize: 6000, tokens: 149}, + {propertySize: 7000, tokens: 149}, + {propertySize: 8000, tokens: 149}, + {propertySize: 9000, tokens: 149}, + {propertySize: 10000, tokens: 149}, + {propertySize: 11000, tokens: 149}, + {propertySize: 12000, tokens: 149}, + {propertySize: 13000, tokens: 149}, + {propertySize: 14000, tokens: 149}, + {propertySize: 15000, tokens: 149}, + {propertySize: 16000, tokens: 149}, + {propertySize: 17000, tokens: 149}, + {propertySize: 18000, tokens: 149}, + {propertySize: 19000, tokens: 149}, + {propertySize: 20000, tokens: 149}, + {propertySize: 21000, tokens: 149}, + {propertySize: 22000, tokens: 149}, + {propertySize: 23000, tokens: 149}, + {propertySize: 24000, tokens: 149}, + {propertySize: 25000, tokens: 149}, + {propertySize: 26000, tokens: 149}, + {propertySize: 27000, tokens: 145}, + {propertySize: 28000, tokens: 140}, + {propertySize: 29000, tokens: 135}, + {propertySize: 30000, tokens: 130}, + {propertySize: 31000, tokens: 126}, + {propertySize: 32000, tokens: 122}, + ]); + }); +}); + + +const dryRun = async (api: ApiPromise, signer: IKeyringPair, tx: any) => { + const signed = await tx.signAsync(signer); + const dryRun = await api.rpc.system.dryRun(signed.toHex()); + return dryRun.isOk && dryRun.asOk.isOk; +}; + +const getTokens = (tokensCount: number, owner: ICrossAccountId, property?: IProperty) => (new Array(tokensCount)).fill(0).map(() => { + const token = {owner} as {owner: ICrossAccountId, properties?: IProperty[]}; + if(property) token.properties = [property]; + return token; +}); + +const tryMintUnsafeRPC = async (helper: UniqueHelper, signer: IKeyringPair, tokensCount: number, collectionId: number, owner: ICrossAccountId, property?: IProperty): Promise => { + if(tokensCount < 10) console.log('try mint', tokensCount, 'tokens'); + const tokens = getTokens(tokensCount, owner, property); + const tx = helper.constructApiCall('api.tx.unique.createMultipleItemsEx', [collectionId, {NFT: tokens}]); + if(!(await dryRun(helper.getApi(), signer, tx))) { + if(tokensCount < 2) return 0; + return await tryMintUnsafeRPC(helper, signer, tokensCount - 1, collectionId, owner, property); + } + await helper.executeExtrinsic(signer, 'api.tx.unique.createMultipleItemsEx', [collectionId, {NFT: tokens}]); + return tokensCount; +}; + +const tryMintExplicit = async (helper: UniqueHelper, signer: IKeyringPair, tokensCount: number, collectionId: number, owner: ICrossAccountId, property?: IProperty): Promise => { + const tokens = getTokens(tokensCount, owner, property); + try { + await helper.executeExtrinsic(signer, 'api.tx.unique.createMultipleItemsEx', [collectionId, {NFT: tokens}]); + } + catch (e) { + if(tokensCount < 2) return 0; + return await tryMintExplicit(helper, signer, tokensCount - 1, collectionId, owner, property); + } + return tokensCount; +}; + + +function sizeOfByteProperty(prop: IProperty) { + return sizeOfEncodedBytes(prop.key) + sizeOfEncodedBytes(prop.value!); +} + +function sizeOfProperty(prop: IProperty) { + return sizeOfEncodedStr(prop.key) + sizeOfEncodedStr(prop.value!); +} + +function sizeOfInt(i: number) { + if(i < 0 || i > 0xffffffff) throw new Error('out of range'); + if(i < 0b11_1111) { + return 1; + } else if(i < 0b11_1111_1111_1111) { + return 2; + } else if(i < 0b11_1111_1111_1111_1111_1111_1111_1111) { + return 4; + } else { + return 5; + } +} + +const UTF8_ENCODER = new TextEncoder(); +function sizeOfEncodedStr(v: string) { + const encoded = UTF8_ENCODER.encode(v); + return sizeOfInt(encoded.length) + encoded.length; +} + +function sizeOfEncodedBytes(bytes: Uint8Array | string) { + return sizeOfInt(bytes.length) + bytes.length; +} From 355fa3f6ae5c80bc22b8a4a7111844f03680cc4a Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 25 Sep 2023 06:51:43 +0000 Subject: [PATCH 048/143] feat(test xcm): added generic `low level` xcm tests --- tests/package.json | 3 +- tests/src/util/playgrounds/unique.dev.ts | 17 +- tests/src/xcm/lowLevelXcmUnique.test.ts | 775 +++++++++++++++++++++++ tests/src/xcm/xcmUnique.test.ts | 186 ++---- 4 files changed, 856 insertions(+), 125 deletions(-) create mode 100644 tests/src/xcm/lowLevelXcmUnique.test.ts diff --git a/tests/package.json b/tests/package.json index 7a5668717b..bec5eddffc 100644 --- a/tests/package.json +++ b/tests/package.json @@ -112,6 +112,7 @@ "testCollators": "RUN_COLLATOR_TESTS=1 yarn _test ./**/collator-selection/**.*test.ts --timeout 49999999", "testCollatorSelection": "RUN_COLLATOR_TESTS=1 yarn _test ./**/collatorSelection.*test.ts --timeout 49999999", "testIdentity": "RUN_COLLATOR_TESTS=1 yarn _test ./**/identity.*test.ts --timeout 49999999", + "testLowLevelXcmUnique": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/lowLevelXcmUnique.test.ts", "testXcmUnique": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmUnique.test.ts", "testXcmQuartz": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmQuartz.test.ts", "testXcmOpal": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmOpal.test.ts", @@ -154,4 +155,4 @@ }, "type": "module", "packageManager": "yarn@3.6.1" -} +} \ No newline at end of file diff --git a/tests/src/util/playgrounds/unique.dev.ts b/tests/src/util/playgrounds/unique.dev.ts index ee097ff13f..499d2e8935 100644 --- a/tests/src/util/playgrounds/unique.dev.ts +++ b/tests/src/util/playgrounds/unique.dev.ts @@ -458,9 +458,9 @@ export class DevUniqueHelper extends UniqueHelper { session: SessionGroup; testUtils: TestUtilGroup; foreignAssets: ForeignAssetsGroup; - xcm: XcmGroup; - xTokens: XTokensGroup; - tokens: TokensGroup; + xcm: XcmGroup; + xTokens: XTokensGroup; + tokens: TokensGroup; scheduler: SchedulerGroup; collatorSelection: CollatorSelectionGroup; council: ICollectiveGroup; @@ -627,10 +627,10 @@ export class DevAcalaHelper extends AcalaHelper { super(logger, options); this.wait = new WaitGroup(this); } - getSudo() { + getSudo() { // eslint-disable-next-line @typescript-eslint/naming-convention const SudoHelperType = SudoHelper(this.helperBase); - return this.clone(SudoHelperType) as T; + return this.clone(SudoHelperType) as DevAcalaHelper; } } @@ -643,10 +643,10 @@ export class DevPolkadexHelper extends PolkadexHelper { this.wait = new WaitGroup(this); } - getSudo() { + getSudo() { // eslint-disable-next-line @typescript-eslint/naming-convention const SudoHelperType = SudoHelper(this.helperBase); - return this.clone(SudoHelperType) as T; + return this.clone(SudoHelperType) as DevPolkadexHelper; } } @@ -1290,7 +1290,8 @@ class WaitGroup { } else if(maxBlocksToWait > 0) { maxBlocksToWait--; } else { - this.helper.logger.log(`Eligible event \`${eventIdStr}\` is NOT found`); + this.helper.logger.log(`Eligible event \`${eventIdStr}\` is NOT found. + The wait lasted until block ${blockNumber} inclusive`); unsubscribe(); resolve(null); } diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts new file mode 100644 index 0000000000..4446f5f986 --- /dev/null +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -0,0 +1,775 @@ +// Copyright 2019-2022 Unique Network (Gibraltar) Ltd. +// This file is part of Unique Network. + +// Unique Network is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Unique Network is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Unique Network. If not, see . + +import {IKeyringPair} from '@polkadot/types/types'; +import config from '../config'; +import {itSub, expect, describeXCM, usingPlaygrounds, usingAcalaPlaygrounds, usingMoonbeamPlaygrounds, usingAstarPlaygrounds, usingPolkadexPlaygrounds} from '../util'; +import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; +import {nToBigInt} from '@polkadot/util'; +import {hexToString} from '@polkadot/util'; + +const UNIQUE_CHAIN = +(process.env.RELAY_UNIQUE_ID || 2037); +const ACALA_CHAIN = +(process.env.RELAY_ACALA_ID || 2000); +const MOONBEAM_CHAIN = +(process.env.RELAY_MOONBEAM_ID || 2004); +const ASTAR_CHAIN = +(process.env.RELAY_ASTAR_ID || 2006); +const POLKADEX_CHAIN = +(process.env.RELAY_POLKADEX_ID || 2040); + + + +const acalaUrl = config.acalaUrl; +const moonbeamUrl = config.moonbeamUrl; +const astarUrl = config.astarUrl; +const polkadexUrl = config.polkadexUrl; + +const ASTAR_DECIMALS = 18n; +const UNQ_DECIMALS = 18n; + +const TRANSFER_AMOUNT = 2000000_000_000_000_000_000_000n; +const SENDER_BUDGET = 2n * TRANSFER_AMOUNT; +const SENDBACK_AMOUNT = TRANSFER_AMOUNT / 2n; +const STAYED_ON_TARGET_CHAIN = TRANSFER_AMOUNT - SENDBACK_AMOUNT; +const TARGET_CHAIN_TOKEN_TRANSFER_AMOUNT = 100_000_000_000n; + +const SAFE_XCM_VERSION = 2; +const maxWaitBlocks = 6; + +const uniqueMultilocation = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: UNIQUE_CHAIN, + }, + }, + }, +}; + +let balanceUniqueTokenInit: bigint; +let balanceUniqueTokenMiddle: bigint; +let balanceUniqueTokenFinal: bigint; +let unqFees: bigint; + +const expectFailedToTransact = async (helper: DevUniqueHelper, messageSent: any) => { + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash + && event.outcome.isFailedToTransactAsset); +}; +const expectUntrustedReserveLocationFail = async (helper: DevUniqueHelper, messageSent: any) => { + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash + && event.outcome.isUntrustedReserveLocation); +}; + +const NETWORKS = { + acala: usingAcalaPlaygrounds, + astar: usingAstarPlaygrounds, + polkadex: usingPolkadexPlaygrounds, + moonbeam: usingMoonbeamPlaygrounds, +} as const; + +function mapToChainId(networkName: keyof typeof NETWORKS) { + switch (networkName) { + case 'acala': + return ACALA_CHAIN; + case 'astar': + return ASTAR_CHAIN; + case 'moonbeam': + return MOONBEAM_CHAIN; + case 'polkadex': + return POLKADEX_CHAIN; + } +} + +function mapToChainUrl(networkName: keyof typeof NETWORKS): string { + switch (networkName) { + case 'acala': + return acalaUrl; + case 'astar': + return astarUrl; + case 'moonbeam': + return moonbeamUrl; + case 'polkadex': + return polkadexUrl; + } +} + +function getDevPlayground(name: T) { + return NETWORKS[name]; +} + + +async function genericSendUnqTo( + networkName: keyof typeof NETWORKS, + randomAccount: IKeyringPair, + randomAccountOnTargetChain = randomAccount, +) { + const networkUrl = mapToChainUrl(networkName); + const targetPlayground = getDevPlayground(networkName); + await usingPlaygrounds(async (helper) => { + balanceUniqueTokenInit = await helper.balance.getSubstrate(randomAccount.address); + const destination = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: mapToChainId(networkName), + }, + }, + }, + }; + + const beneficiary = { + V2: { + parents: 0, + interior: { + X1: ( + networkName == 'moonbeam' ? + { + AccountKey20: { + network: 'Any', + key: randomAccountOnTargetChain.address, + }, + } + : + { + AccountId32: { + network: 'Any', + id: randomAccountOnTargetChain.addressRaw, + }, + } + ), + }, + }, + }; + + const assets = { + V2: [ + { + id: { + Concrete: { + parents: 0, + interior: 'Here', + }, + }, + fun: { + Fungible: TRANSFER_AMOUNT, + }, + }, + ], + }; + const feeAssetItem = 0; + + await helper.xcm.limitedReserveTransferAssets(randomAccount, destination, beneficiary, assets, feeAssetItem, 'Unlimited'); + const messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + balanceUniqueTokenMiddle = await helper.balance.getSubstrate(randomAccount.address); + + unqFees = balanceUniqueTokenInit - balanceUniqueTokenMiddle - TRANSFER_AMOUNT; + console.log('[Unique -> %s] transaction fees on Unique: %s UNQ', networkName, helper.util.bigIntToDecimals(unqFees)); + expect(unqFees > 0n, 'Negative fees UNQ, looks like nothing was transferred').to.be.true; + + await targetPlayground(networkUrl, async (helper) => { + /* + Since only the parachain part of the Polkadex + infrastructure is launched (without their + solochain validators), processing incoming + assets will lead to an error. + This error indicates that the Polkadex chain + received a message from the Unique network, + since the hash is being checked to ensure + it matches what was sent. + */ + if(networkName == 'polkadex') { + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash); + } else { + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Success, event => event.messageHash == messageSent.messageHash); + } + }); + + }); +} + +async function genericSendUnqBack( + networkName: keyof typeof NETWORKS, + sudoer: IKeyringPair, + randomAccountOnUnq: IKeyringPair, +) { + const networkUrl = mapToChainUrl(networkName); + + const targetPlayground = getDevPlayground(networkName); + await usingPlaygrounds(async (helper) => { + + const xcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( + randomAccountOnUnq.addressRaw, + { + Concrete: { + parents: 1, + interior: { + X1: {Parachain: UNIQUE_CHAIN}, + }, + }, + }, + SENDBACK_AMOUNT, + ); + + let xcmProgramSent: any; + + + await targetPlayground(networkUrl, async (helper) => { + if('getSudo' in helper) { + await helper.getSudo().xcm.send(sudoer, uniqueMultilocation, xcmProgram); + xcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } else if('fastDemocracy' in helper) { + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, xcmProgram]); + // Needed to bypass the call filter. + const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); + await helper.fastDemocracy.executeProposal('sending MoonBeam -> Unique via XCM program', batchCall); + xcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + }); + + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Success, event => event.messageHash == xcmProgramSent.messageHash); + + balanceUniqueTokenFinal = await helper.balance.getSubstrate(randomAccountOnUnq.address); + + expect(balanceUniqueTokenFinal).to.be.equal(balanceUniqueTokenInit - unqFees - STAYED_ON_TARGET_CHAIN); + + }); +} + +async function genericSendOnlyOwnedBalance( + networkName: keyof typeof NETWORKS, + sudoer: IKeyringPair, +) { + const networkUrl = mapToChainUrl(networkName); + const targetPlayground = getDevPlayground(networkName); + + const targetChainBalance = 10000n * (10n ** UNQ_DECIMALS); + + await usingPlaygrounds(async (helper) => { + const targetChainSovereignAccount = helper.address.paraSiblingSovereignAccount(mapToChainId(networkName)); + await helper.getSudo().balance.setBalanceSubstrate(sudoer, targetChainSovereignAccount, targetChainBalance); + const moreThanTargetChainHas = 2n * targetChainBalance; + + const targetAccount = helper.arrange.createEmptyAccount(); + + const maliciousXcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( + targetAccount.addressRaw, + { + Concrete: { + parents: 0, + interior: 'Here', + }, + }, + moreThanTargetChainHas, + ); + + let maliciousXcmProgramSent: any; + + + await targetPlayground(networkUrl, async (helper) => { + if('getSudo' in helper) { + await helper.getSudo().xcm.send(sudoer, uniqueMultilocation, maliciousXcmProgram); + maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } else if('fastDemocracy' in helper) { + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, maliciousXcmProgram]); + // Needed to bypass the call filter. + const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); + await helper.fastDemocracy.executeProposal('sending MoonBeam -> Unique via XCM program', batchCall); + maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + }); + + await expectFailedToTransact(helper, maliciousXcmProgramSent); + + const targetAccountBalance = await helper.balance.getSubstrate(targetAccount.address); + expect(targetAccountBalance).to.be.equal(0n); + }); +} + +async function genericReserveTransferUNQfrom(netwokrName: keyof typeof NETWORKS, sudoer: IKeyringPair) { + const networkUrl = mapToChainUrl(netwokrName); + const targetPlayground = getDevPlayground(netwokrName); + + await usingPlaygrounds(async (helper) => { + const testAmount = 10_000n * (10n ** UNQ_DECIMALS); + const targetAccount = helper.arrange.createEmptyAccount(); + + const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( + targetAccount.addressRaw, + { + Concrete: { + parents: 1, + interior: { + X1: { + Parachain: UNIQUE_CHAIN, + }, + }, + }, + }, + testAmount, + ); + + const maliciousXcmProgramHereId = helper.arrange.makeXcmProgramReserveAssetDeposited( + targetAccount.addressRaw, + { + Concrete: { + parents: 0, + interior: 'Here', + }, + }, + testAmount, + ); + + let maliciousXcmProgramFullIdSent: any; + let maliciousXcmProgramHereIdSent: any; + const maxWaitBlocks = 3; + + // Try to trick Unique using full UNQ identification + await targetPlayground(networkUrl, async (helper) => { + if('getSudo' in helper) { + await helper.getSudo().xcm.send(sudoer, uniqueMultilocation, maliciousXcmProgramFullId); + maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + // Moonbeam case + else if('fastDemocracy' in helper) { + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, maliciousXcmProgramFullId]); + + // Needed to bypass the call filter. + const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); + await helper.fastDemocracy.executeProposal('try to act like a reserve location for UNQ using path asset identification', batchCall); + + maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + }); + + + await expectUntrustedReserveLocationFail(helper, maliciousXcmProgramFullIdSent); + + let accountBalance = await helper.balance.getSubstrate(targetAccount.address); + expect(accountBalance).to.be.equal(0n); + + // Try to trick Unique using shortened UNQ identification + await targetPlayground(networkUrl, async (helper) => { + if('getSudo' in helper) { + await helper.getSudo().xcm.send(sudoer, uniqueMultilocation, maliciousXcmProgramHereId); + maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + else if('fastDemocracy' in helper) { + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, maliciousXcmProgramHereId]); + + // Needed to bypass the call filter. + const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); + await helper.fastDemocracy.executeProposal('try to act like a reserve location for UNQ using "here" asset identification', batchCall); + + maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + }); + + await expectUntrustedReserveLocationFail(helper, maliciousXcmProgramHereIdSent); + + accountBalance = await helper.balance.getSubstrate(targetAccount.address); + expect(accountBalance).to.be.equal(0n); + }); +} + +async function genericRejectNativeToknsFrom(netwokrName: keyof typeof NETWORKS, sudoerOnTargetChain: IKeyringPair) { + const networkUrl = mapToChainUrl(netwokrName); + const targetPlayground = getDevPlayground(netwokrName); + let messageSent: any; + + await usingPlaygrounds(async (helper) => { + const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( + helper.arrange.createEmptyAccount().addressRaw, + { + Concrete: { + parents: 1, + interior: { + X1: { + Parachain: mapToChainId(netwokrName), + }, + }, + }, + }, + TARGET_CHAIN_TOKEN_TRANSFER_AMOUNT, + ); + await targetPlayground(networkUrl, async (helper) => { + if('getSudo' in helper) { + await helper.getSudo().xcm.send(sudoerOnTargetChain, uniqueMultilocation, maliciousXcmProgramFullId); + messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } else if('fastDemocracy' in helper) { + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, maliciousXcmProgramFullId]); + + // Needed to bypass the call filter. + const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); + await helper.fastDemocracy.executeProposal('sending native tokens to the Unique via fast democracy', batchCall); + + messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + }); + + await expectFailedToTransact(helper, messageSent); + }); +} + + +describeXCM('[XCMLL] Integration test: Exchanging tokens with Acala', () => { + let alice: IKeyringPair; + let randomAccount: IKeyringPair; + + before(async () => { + await usingPlaygrounds(async (helper, privateKey) => { + alice = await privateKey('//Alice'); + console.log(config.acalaUrl); + randomAccount = helper.arrange.createEmptyAccount(); + + // Set the default version to wrap the first message to other chains. + await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); + }); + + await usingAcalaPlaygrounds(acalaUrl, async (helper) => { + const destination = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: UNIQUE_CHAIN, + }, + }, + }, + }; + + const metadata = { + name: 'Unique Network', + symbol: 'UNQ', + decimals: 18, + minimalBalance: 1250_000_000_000_000_000n, + }; + const assets = (await (helper.callRpc('api.query.assetRegistry.assetMetadatas.entries'))).map(([_k, v] : [any, any]) => + hexToString(v.toJSON()['symbol'])) as string[]; + + if(!assets.includes('UNQ')) { + await helper.getSudo().assetRegistry.registerForeignAsset(alice, destination, metadata); + } else { + console.log('UNQ token already registered on Acala assetRegistry pallet'); + } + await helper.balance.transferToSubstrate(alice, randomAccount.address, 10000000000000n); + }); + + await usingPlaygrounds(async (helper) => { + await helper.balance.transferToSubstrate(alice, randomAccount.address, SENDER_BUDGET); + balanceUniqueTokenInit = await helper.balance.getSubstrate(randomAccount.address); + }); + }); + + itSub('Should connect and send UNQ to Acala', async () => { + await genericSendUnqTo('acala', randomAccount); + }); + + itSub('Should connect to Acala and send UNQ back', async () => { + await genericSendUnqBack('acala', alice, randomAccount); + }); + + itSub('Acala can send only up to its balance', async () => { + await genericSendOnlyOwnedBalance('acala', alice); + }); + + itSub('Should not accept reserve transfer of UNQ from Acala', async () => { + await genericReserveTransferUNQfrom('acala', alice); + }); +}); + +describeXCM('[XCMLL] Integration test: Exchanging tokens with Polkadex', () => { + let alice: IKeyringPair; + let randomAccount: IKeyringPair; + + const uniqueAssetId = { + Concrete: { + parents: 1, + interior: { + X1: { + Parachain: UNIQUE_CHAIN, + }, + }, + }, + }; + + before(async () => { + await usingPlaygrounds(async (helper, privateKey) => { + alice = await privateKey('//Alice'); + randomAccount = helper.arrange.createEmptyAccount(); + + // Set the default version to wrap the first message to other chains. + await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); + }); + + await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { + const isWhitelisted = ((await helper.callRpc('api.query.xcmHelper.whitelistedTokens', [])) + .toJSON() as []) + .map(nToBigInt).length != 0; + /* + Check whether the Unique token has been added + to the whitelist, since an error will occur + if it is added again. Needed for debugging + when this test is run multiple times. + */ + if(!isWhitelisted) { + await helper.getSudo().xcmHelper.whitelistToken(alice, uniqueAssetId); + } + + await helper.balance.transferToSubstrate(alice, randomAccount.address, 10000000000000n); + }); + + await usingPlaygrounds(async (helper) => { + await helper.balance.transferToSubstrate(alice, randomAccount.address, SENDER_BUDGET); + balanceUniqueTokenInit = await helper.balance.getSubstrate(randomAccount.address); + }); + }); + + itSub('Should connect and send UNQ to Polkadex', async () => { + await genericSendUnqTo('polkadex', randomAccount); + }); + + + itSub('Should connect to Polkadex and send UNQ back', async () => { + await genericSendUnqBack('polkadex', alice, randomAccount); + }); + + itSub('Polkadex can send only up to its balance', async () => { + await genericSendOnlyOwnedBalance('polkadex', alice); + }); + + itSub('Should not accept reserve transfer of UNQ from Polkadex', async () => { + await genericReserveTransferUNQfrom('polkadex', alice); + }); +}); + +// These tests are relevant only when +// the the corresponding foreign assets are not registered +describeXCM('[XCMLL] Integration test: Unique rejects non-native tokens', () => { + let alice: IKeyringPair; + + before(async () => { + await usingPlaygrounds(async (helper, privateKey) => { + alice = await privateKey('//Alice'); + + // Set the default version to wrap the first message to other chains. + await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); + }); + }); + + itSub('Unique rejects ACA tokens from Acala', async () => { + await genericRejectNativeToknsFrom('acala', alice); + }); + + itSub('Unique rejects GLMR tokens from Moonbeam', async () => { + await genericRejectNativeToknsFrom('moonbeam', alice); + }); + + itSub('Unique rejects ASTR tokens from Astar', async () => { + await genericRejectNativeToknsFrom('astar', alice); + }); + + itSub('Unique rejects PDX tokens from Polkadex', async () => { + await genericRejectNativeToknsFrom('polkadex', alice); + }); +}); + +describeXCM('[XCMLL] Integration test: Exchanging UNQ with Moonbeam', () => { + // Unique constants + let alice: IKeyringPair; + let uniqueAssetLocation; + + let randomAccountUnique: IKeyringPair; + let randomAccountMoonbeam: IKeyringPair; + + // Moonbeam constants + let assetId: string; + + const uniqueAssetMetadata = { + name: 'xcUnique', + symbol: 'xcUNQ', + decimals: 18, + isFrozen: false, + minimalBalance: 1n, + }; + + + before(async () => { + await usingPlaygrounds(async (helper, privateKey) => { + alice = await privateKey('//Alice'); + [randomAccountUnique] = await helper.arrange.createAccounts([0n], alice); + + + // Set the default version to wrap the first message to other chains. + await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); + }); + + await usingMoonbeamPlaygrounds(moonbeamUrl, async (helper) => { + const alithAccount = helper.account.alithAccount(); + const baltatharAccount = helper.account.baltatharAccount(); + const dorothyAccount = helper.account.dorothyAccount(); + + randomAccountMoonbeam = helper.account.create(); + + // >>> Sponsoring Dorothy >>> + console.log('Sponsoring Dorothy.......'); + await helper.balance.transferToEthereum(alithAccount, dorothyAccount.address, 11_000_000_000_000_000_000n); + console.log('Sponsoring Dorothy.......DONE'); + // <<< Sponsoring Dorothy <<< + uniqueAssetLocation = { + XCM: { + parents: 1, + interior: {X1: {Parachain: UNIQUE_CHAIN}}, + }, + }; + const existentialDeposit = 1n; + const isSufficient = true; + const unitsPerSecond = 1n; + const numAssetsWeightHint = 0; + + if((await helper.assetManager.assetTypeId(uniqueAssetLocation)).toJSON()) { + console.log('Unique asset already registered'); + } else { + const encodedProposal = helper.assetManager.makeRegisterForeignAssetProposal({ + location: uniqueAssetLocation, + metadata: uniqueAssetMetadata, + existentialDeposit, + isSufficient, + unitsPerSecond, + numAssetsWeightHint, + }); + + console.log('Encoded proposal for registerForeignAsset & setAssetUnitsPerSecond is %s', encodedProposal); + + await helper.fastDemocracy.executeProposal('register UNQ foreign asset', encodedProposal); + + // >>> Acquire Unique AssetId Info on Moonbeam >>> + console.log('Acquire Unique AssetId Info on Moonbeam.......'); + + assetId = (await helper.assetManager.assetTypeId(uniqueAssetLocation)).toString(); + + console.log('UNQ asset ID is %s', assetId); + console.log('Acquire Unique AssetId Info on Moonbeam.......DONE'); + } + // >>> Acquire Unique AssetId Info on Moonbeam >>> + + // >>> Sponsoring random Account >>> + console.log('Sponsoring random Account.......'); + await helper.balance.transferToEthereum(baltatharAccount, randomAccountMoonbeam.address, 11_000_000_000_000_000_000n); + console.log('Sponsoring random Account.......DONE'); + // <<< Sponsoring random Account <<< + }); + + await usingPlaygrounds(async (helper) => { + await helper.balance.transferToSubstrate(alice, randomAccountUnique.address, SENDER_BUDGET); + balanceUniqueTokenInit = await helper.balance.getSubstrate(randomAccountUnique.address); + }); + }); + + itSub('Should connect and send UNQ to Moonbeam', async () => { + await genericSendUnqTo('moonbeam', randomAccountUnique, randomAccountMoonbeam); + }); + + itSub('Should connect to Moonbeam and send UNQ back', async () => { + await genericSendUnqBack('moonbeam', alice, randomAccountUnique); + }); + + itSub('Moonbeam can send only up to its balance', async () => { + await genericSendOnlyOwnedBalance('moonbeam', alice); + }); + + itSub('Should not accept reserve transfer of UNQ from Moonbeam', async () => { + await genericReserveTransferUNQfrom('moonbeam', alice); + }); +}); + +describeXCM('[XCMLL] Integration test: Exchanging tokens with Astar', () => { + let alice: IKeyringPair; + let randomAccount: IKeyringPair; + + const UNQ_ASSET_ID_ON_ASTAR = 1; + const UNQ_MINIMAL_BALANCE_ON_ASTAR = 1n; + + // Unique -> Astar + const astarInitialBalance = 1n * (10n ** ASTAR_DECIMALS); // 1 ASTR, existential deposit required to actually create the account on Astar. + const unitsPerSecond = 228_000_000_000n; // This is Phala's value. What will be ours? + + before(async () => { + await usingPlaygrounds(async (helper, privateKey) => { + alice = await privateKey('//Alice'); + randomAccount = helper.arrange.createEmptyAccount(); + await helper.balance.transferToSubstrate(alice, randomAccount.address, SENDER_BUDGET); + console.log('randomAccount', randomAccount.address); + + // Set the default version to wrap the first message to other chains. + await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); + }); + + await usingAstarPlaygrounds(astarUrl, async (helper) => { + if(!(await helper.callRpc('api.query.assets.asset', [UNQ_ASSET_ID_ON_ASTAR])).toJSON()) { + console.log('1. Create foreign asset and metadata'); + // TODO update metadata with values from production + await helper.assets.create( + alice, + UNQ_ASSET_ID_ON_ASTAR, + alice.address, + UNQ_MINIMAL_BALANCE_ON_ASTAR, + ); + + await helper.assets.setMetadata( + alice, + UNQ_ASSET_ID_ON_ASTAR, + 'Cross chain UNQ', + 'xcUNQ', + Number(UNQ_DECIMALS), + ); + + console.log('2. Register asset location on Astar'); + const assetLocation = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: UNIQUE_CHAIN, + }, + }, + }, + }; + + await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.registerAssetLocation', [assetLocation, UNQ_ASSET_ID_ON_ASTAR]); + + console.log('3. Set UNQ payment for XCM execution on Astar'); + await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.setAssetUnitsPerSecond', [assetLocation, unitsPerSecond]); + } + console.log('4. Transfer 1 ASTR to recipient to create the account (needed due to existential balance)'); + await helper.balance.transferToSubstrate(alice, randomAccount.address, astarInitialBalance); + }); + }); + + itSub('Should connect and send UNQ to Astar', async () => { + await genericSendUnqTo('astar', randomAccount); + }); + + itSub('Should connect to Astar and send UNQ back', async () => { + await genericSendUnqBack('astar', alice, randomAccount); + }); + + itSub('Astar can send only up to its balance', async () => { + await genericSendOnlyOwnedBalance('astar', alice); + }); + + itSub('Should not accept reserve transfer of UNQ from Astar', async () => { + await genericReserveTransferUNQfrom('astar', alice); + }); +}); diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index 90e3baf299..9ee60022d5 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -18,7 +18,7 @@ import {IKeyringPair} from '@polkadot/types/types'; import config from '../config'; import {itSub, expect, describeXCM, usingPlaygrounds, usingAcalaPlaygrounds, usingRelayPlaygrounds, usingMoonbeamPlaygrounds, usingStatemintPlaygrounds, usingAstarPlaygrounds, usingPolkadexPlaygrounds} from '../util'; import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; -import {nToBigInt} from '@polkadot/util'; +import {hexToString, nToBigInt} from '@polkadot/util'; const UNIQUE_CHAIN = +(process.env.RELAY_UNIQUE_ID || 2037); const STATEMINT_CHAIN = +(process.env.RELAY_STATEMINT_ID || 1000); @@ -57,6 +57,18 @@ const USDT_ASSET_AMOUNT = 10_000_000_000_000_000_000_000_000n; const SAFE_XCM_VERSION = 2; const maxWaitBlocks = 6; + +const uniqueMultilocation = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: UNIQUE_CHAIN, + }, + }, + }, +}; + const expectFailedToTransact = async (helper: DevUniqueHelper, messageSent: any) => { await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash && event.outcome.isFailedToTransactAsset); @@ -509,11 +521,15 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Acala', () => { decimals: 18, minimalBalance: 1250000000000000000n, }; + const assets = (await (helper.callRpc('api.query.assetRegistry.assetMetadatas.entries'))).map(([_k, v] : [any, any]) => + hexToString(v.toJSON()['symbol'])) as string[]; - await helper.getSudo().assetRegistry.registerForeignAsset(alice, destination, metadata); + if(!assets.includes('UNQ')) { + await helper.getSudo().assetRegistry.registerForeignAsset(alice, destination, metadata); + } else { + console.log('UNQ token already registered on Acala assetRegistry pallet'); + } await helper.balance.transferToSubstrate(alice, randomAccount.address, 10000000000000n); - balanceAcalaTokenInit = await helper.balance.getSubstrate(randomAccount.address); - balanceUniqueForeignTokenInit = await helper.tokens.accounts(randomAccount.address, {ForeignAsset: 0}); }); await usingPlaygrounds(async (helper) => { @@ -670,15 +686,6 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Acala', () => { let targetAccountBalance = 0n; const [targetAccount] = await helper.arrange.createAccounts([targetAccountBalance], alice); - const uniqueMultilocation = { - V2: { - parents: 1, - interior: { - X1: {Parachain: UNIQUE_CHAIN}, - }, - }, - }; - const maliciousXcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( targetAccount.addressRaw, { @@ -925,15 +932,6 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { itSub('Should connect to Polkadex and send UNQ back', async ({helper}) => { - const uniqueMultilocation = { - V2: { - parents: 1, - interior: { - X1: {Parachain: UNIQUE_CHAIN}, - }, - }, - }; - const xcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( randomAccount.addressRaw, { @@ -971,15 +969,6 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { const targetAccount = helper.arrange.createEmptyAccount(); - const uniqueMultilocation = { - V2: { - parents: 1, - interior: { - X1: {Parachain: UNIQUE_CHAIN}, - }, - }, - }; - const maliciousXcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( targetAccount.addressRaw, { @@ -1304,26 +1293,30 @@ describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { const unitsPerSecond = 1n; const numAssetsWeightHint = 0; - const encodedProposal = helper.assetManager.makeRegisterForeignAssetProposal({ - location: uniqueAssetLocation, - metadata: uniqueAssetMetadata, - existentialDeposit, - isSufficient, - unitsPerSecond, - numAssetsWeightHint, - }); + if((await helper.assetManager.assetTypeId(uniqueAssetLocation)).toJSON) { + console.log('Unique asset is already registered on MoonBeam'); + } else { + const encodedProposal = helper.assetManager.makeRegisterForeignAssetProposal({ + location: uniqueAssetLocation, + metadata: uniqueAssetMetadata, + existentialDeposit, + isSufficient, + unitsPerSecond, + numAssetsWeightHint, + }); - console.log('Encoded proposal for registerForeignAsset & setAssetUnitsPerSecond is %s', encodedProposal); + console.log('Encoded proposal for registerForeignAsset & setAssetUnitsPerSecond is %s', encodedProposal); - await helper.fastDemocracy.executeProposal('register UNQ foreign asset', encodedProposal); + await helper.fastDemocracy.executeProposal('register UNQ foreign asset', encodedProposal); - // >>> Acquire Unique AssetId Info on Moonbeam >>> - console.log('Acquire Unique AssetId Info on Moonbeam.......'); + // >>> Acquire Unique AssetId Info on Moonbeam >>> + console.log('Acquire Unique AssetId Info on Moonbeam.......'); - assetId = (await helper.assetManager.assetTypeId(uniqueAssetLocation)).toString(); + assetId = (await helper.assetManager.assetTypeId(uniqueAssetLocation)).toString(); - console.log('UNQ asset ID is %s', assetId); - console.log('Acquire Unique AssetId Info on Moonbeam.......DONE'); + console.log('UNQ asset ID is %s', assetId); + console.log('Acquire Unique AssetId Info on Moonbeam.......DONE'); + } // >>> Acquire Unique AssetId Info on Moonbeam >>> // >>> Sponsoring random Account >>> @@ -1456,15 +1449,6 @@ describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { let targetAccountBalance = 0n; const [targetAccount] = await helper.arrange.createAccounts([targetAccountBalance], alice); - const uniqueMultilocation = { - V2: { - parents: 1, - interior: { - X1: {Parachain: UNIQUE_CHAIN}, - }, - }, - }; - const maliciousXcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( targetAccount.addressRaw, { @@ -1527,17 +1511,6 @@ describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { const testAmount = 10_000n * (10n ** UNQ_DECIMALS); const [targetAccount] = await helper.arrange.createAccounts([0n], alice); - const uniqueMultilocation = { - V2: { - parents: 1, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, - }, - }, - }, - }; - const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( targetAccount.addressRaw, { @@ -1634,40 +1607,41 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { }); await usingAstarPlaygrounds(astarUrl, async (helper) => { - console.log('1. Create foreign asset and metadata'); - // TODO update metadata with values from production - await helper.assets.create( - alice, - UNQ_ASSET_ID_ON_ASTAR, - alice.address, - UNQ_MINIMAL_BALANCE_ON_ASTAR, - ); - - await helper.assets.setMetadata( - alice, - UNQ_ASSET_ID_ON_ASTAR, - 'Cross chain UNQ', - 'xcUNQ', - Number(UNQ_DECIMALS), - ); - - console.log('2. Register asset location on Astar'); - const assetLocation = { - V2: { - parents: 1, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, + if(!(await helper.callRpc('api.query.assets.asset', [UNQ_ASSET_ID_ON_ASTAR])).toJSON()) { + console.log('1. Create foreign asset and metadata'); + // TODO update metadata with values from production + await helper.assets.create( + alice, + UNQ_ASSET_ID_ON_ASTAR, + alice.address, + UNQ_MINIMAL_BALANCE_ON_ASTAR, + ); + + await helper.assets.setMetadata( + alice, + UNQ_ASSET_ID_ON_ASTAR, + 'Cross chain UNQ', + 'xcUNQ', + Number(UNQ_DECIMALS), + ); + + console.log('2. Register asset location on Astar'); + const assetLocation = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: UNIQUE_CHAIN, + }, }, }, - }, - }; - - await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.registerAssetLocation', [assetLocation, UNQ_ASSET_ID_ON_ASTAR]); + }; - console.log('3. Set UNQ payment for XCM execution on Astar'); - await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.setAssetUnitsPerSecond', [assetLocation, unitsPerSecond]); + await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.registerAssetLocation', [assetLocation, UNQ_ASSET_ID_ON_ASTAR]); + console.log('3. Set UNQ payment for XCM execution on Astar'); + await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.setAssetUnitsPerSecond', [assetLocation, unitsPerSecond]); + } console.log('4. Transfer 1 ASTR to recipient to create the account (needed due to existential balance)'); await helper.balance.transferToSubstrate(alice, randomAccount.address, astarInitialBalance); }); @@ -1825,15 +1799,6 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { let targetAccountBalance = 0n; const [targetAccount] = await helper.arrange.createAccounts([targetAccountBalance], alice); - const uniqueMultilocation = { - V2: { - parents: 1, - interior: { - X1: {Parachain: UNIQUE_CHAIN}, - }, - }, - }; - const maliciousXcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( targetAccount.addressRaw, { @@ -1888,17 +1853,6 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { const testAmount = 10_000n * (10n ** UNQ_DECIMALS); const [targetAccount] = await helper.arrange.createAccounts([0n], alice); - const uniqueMultilocation = { - V2: { - parents: 1, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, - }, - }, - }, - }; - const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( targetAccount.addressRaw, { From d91b2e0730df68273cb1e95d964f05de3502f6a0 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 25 Sep 2023 06:55:54 +0000 Subject: [PATCH 049/143] fix(xcm unique test): typo --- tests/src/xcm/xcmUnique.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index 9ee60022d5..ee434f108e 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -1293,7 +1293,7 @@ describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { const unitsPerSecond = 1n; const numAssetsWeightHint = 0; - if((await helper.assetManager.assetTypeId(uniqueAssetLocation)).toJSON) { + if((await helper.assetManager.assetTypeId(uniqueAssetLocation)).toJSON()) { console.log('Unique asset is already registered on MoonBeam'); } else { const encodedProposal = helper.assetManager.makeRegisterForeignAssetProposal({ From fa6d62bd864f8365945462f9879e88517436e8b1 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 25 Sep 2023 09:43:55 +0000 Subject: [PATCH 050/143] fix(unq xcm tests): restore state for the Acala --- tests/src/xcm/xcmUnique.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index ee434f108e..62d19f28b7 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -530,6 +530,8 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Acala', () => { console.log('UNQ token already registered on Acala assetRegistry pallet'); } await helper.balance.transferToSubstrate(alice, randomAccount.address, 10000000000000n); + balanceAcalaTokenInit = await helper.balance.getSubstrate(randomAccount.address); + balanceUniqueForeignTokenInit = await helper.tokens.accounts(randomAccount.address, {ForeignAsset: 0}); }); await usingPlaygrounds(async (helper) => { @@ -1362,7 +1364,6 @@ describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { await usingMoonbeamPlaygrounds(moonbeamUrl, async (helper) => { await helper.wait.newBlocks(3); - balanceGlmrTokenMiddle = await helper.balance.getEthereum(randomAccountMoonbeam.address); const glmrFees = balanceGlmrTokenInit - balanceGlmrTokenMiddle; From 1c40ef30a9680bad61490581b9faed1aad89fd8b Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 25 Sep 2023 13:12:08 +0000 Subject: [PATCH 051/143] ci(xcm) : added llcxm test into wokrflow --- .github/workflows/xcm.yml | 12 ++++-------- tests/package.json | 1 + 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/.github/workflows/xcm.yml b/.github/workflows/xcm.yml index 1dde0e2b37..a70bdbd155 100644 --- a/.github/workflows/xcm.yml +++ b/.github/workflows/xcm.yml @@ -13,9 +13,7 @@ env: # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: - prepare-execution-marix: - name: Prepare execution matrix runs-on: [self-hosted-ci] @@ -23,14 +21,13 @@ jobs: matrix: ${{ steps.create_matrix.outputs.matrix }} steps: - - name: Clean Workspace uses: AutoModality/action-clean@v1.1.0 # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v3.1.0 with: - ref: ${{ github.head_ref }} #Checking out head commit + ref: ${{ github.head_ref }} #Checking out head commit - name: Read .env file uses: xom9ikk/dotenv@v2 @@ -42,10 +39,9 @@ jobs: matrix: | network {opal}, relay_branch {${{ env.UNIQUEWEST_MAINNET_BRANCH }}}, acala_version {${{ env.ACALA_BUILD_BRANCH }}}, moonbeam_version {${{ env.MOONBEAM_BUILD_BRANCH }}}, cumulus_version {${{ env.WESTMINT_BUILD_BRANCH }}}, astar_version {${{ env.ASTAR_BUILD_BRANCH }}}, polkadex_version {${{ env.POLKADEX_BUILD_BRANCH }}}, runtest {testXcmOpal}, runtime_features {opal-runtime} network {quartz}, relay_branch {${{ env.KUSAMA_MAINNET_BRANCH }}}, acala_version {${{ env.KARURA_BUILD_BRANCH }}}, moonbeam_version {${{ env.MOONRIVER_BUILD_BRANCH }}}, cumulus_version {${{ env.STATEMINE_BUILD_BRANCH }}}, astar_version {${{ env.SHIDEN_BUILD_BRANCH }}}, polkadex_version {${{ env.POLKADEX_BUILD_BRANCH }}}, runtest {testXcmQuartz}, runtime_features {quartz-runtime} - network {unique}, relay_branch {${{ env.POLKADOT_MAINNET_BRANCH }}}, acala_version {${{ env.ACALA_BUILD_BRANCH }}}, moonbeam_version {${{ env.MOONBEAM_BUILD_BRANCH }}}, cumulus_version {${{ env.STATEMINT_BUILD_BRANCH }}}, astar_version {${{ env.ASTAR_BUILD_BRANCH }}}, polkadex_version {${{ env.POLKADEX_BUILD_BRANCH }}}, runtest {testXcmUnique}, runtime_features {unique-runtime} + network {unique}, relay_branch {${{ env.POLKADOT_MAINNET_BRANCH }}}, acala_version {${{ env.ACALA_BUILD_BRANCH }}}, moonbeam_version {${{ env.MOONBEAM_BUILD_BRANCH }}}, cumulus_version {${{ env.STATEMINT_BUILD_BRANCH }}}, astar_version {${{ env.ASTAR_BUILD_BRANCH }}}, polkadex_version {${{ env.POLKADEX_BUILD_BRANCH }}}, runtest {testFullXcmUnique}, runtime_features {unique-runtime} xcm: - needs: prepare-execution-marix # The type of runner that the job will run on runs-on: [XL] @@ -54,7 +50,7 @@ jobs: name: ${{ matrix.network }} - continue-on-error: true #Do not stop testing of matrix runs failed. As it decided during PR review - it required 50/50& Let's check it with false. + continue-on-error: true #Do not stop testing of matrix runs failed. As it decided during PR review - it required 50/50& Let's check it with false. strategy: matrix: @@ -71,7 +67,7 @@ jobs: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v3.1.0 with: - ref: ${{ github.head_ref }} #Checking out head commit + ref: ${{ github.head_ref }} #Checking out head commit # Prepare SHA - name: Prepare SHA diff --git a/tests/package.json b/tests/package.json index bec5eddffc..453c7d6807 100644 --- a/tests/package.json +++ b/tests/package.json @@ -114,6 +114,7 @@ "testIdentity": "RUN_COLLATOR_TESTS=1 yarn _test ./**/identity.*test.ts --timeout 49999999", "testLowLevelXcmUnique": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/lowLevelXcmUnique.test.ts", "testXcmUnique": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmUnique.test.ts", + "testFullXcmUnique": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/*xcmUnique.test.ts", "testXcmQuartz": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmQuartz.test.ts", "testXcmOpal": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmOpal.test.ts", "testXcmTransferAcala": "yarn _test ./**/xcm/xcmTransferAcala.test.ts acalaId=2000 uniqueId=5000", From 987aad3eb3991168aca45300c02b4a5c3cc52e0d Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 25 Sep 2023 18:11:48 +0000 Subject: [PATCH 052/143] fix(test xcm) : fix package --- tests/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/package.json b/tests/package.json index 453c7d6807..48371c4636 100644 --- a/tests/package.json +++ b/tests/package.json @@ -114,7 +114,7 @@ "testIdentity": "RUN_COLLATOR_TESTS=1 yarn _test ./**/identity.*test.ts --timeout 49999999", "testLowLevelXcmUnique": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/lowLevelXcmUnique.test.ts", "testXcmUnique": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmUnique.test.ts", - "testFullXcmUnique": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/*xcmUnique.test.ts", + "testFullXcmUnique": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/*Unique.test.ts", "testXcmQuartz": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmQuartz.test.ts", "testXcmOpal": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmOpal.test.ts", "testXcmTransferAcala": "yarn _test ./**/xcm/xcmTransferAcala.test.ts acalaId=2000 uniqueId=5000", From 10b818031b10092f20860b300b803f9d3cbd1e46 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Tue, 26 Sep 2023 06:01:46 +0000 Subject: [PATCH 053/143] fix(xcmUnique): hook for Moonbeam --- tests/src/xcm/lowLevelXcmUnique.test.ts | 13 ++++++------- tests/src/xcm/xcmUnique.test.ts | 13 ++++++------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts index 4446f5f986..a95fc10ad4 100644 --- a/tests/src/xcm/lowLevelXcmUnique.test.ts +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -653,16 +653,15 @@ describeXCM('[XCMLL] Integration test: Exchanging UNQ with Moonbeam', () => { console.log('Encoded proposal for registerForeignAsset & setAssetUnitsPerSecond is %s', encodedProposal); await helper.fastDemocracy.executeProposal('register UNQ foreign asset', encodedProposal); + } - // >>> Acquire Unique AssetId Info on Moonbeam >>> - console.log('Acquire Unique AssetId Info on Moonbeam.......'); + // >>> Acquire Unique AssetId Info on Moonbeam >>> + console.log('Acquire Unique AssetId Info on Moonbeam.......'); - assetId = (await helper.assetManager.assetTypeId(uniqueAssetLocation)).toString(); + assetId = (await helper.assetManager.assetTypeId(uniqueAssetLocation)).toString(); - console.log('UNQ asset ID is %s', assetId); - console.log('Acquire Unique AssetId Info on Moonbeam.......DONE'); - } - // >>> Acquire Unique AssetId Info on Moonbeam >>> + console.log('UNQ asset ID is %s', assetId); + console.log('Acquire Unique AssetId Info on Moonbeam.......DONE'); // >>> Sponsoring random Account >>> console.log('Sponsoring random Account.......'); diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index 62d19f28b7..3e689fe9e6 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -1310,15 +1310,14 @@ describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { console.log('Encoded proposal for registerForeignAsset & setAssetUnitsPerSecond is %s', encodedProposal); await helper.fastDemocracy.executeProposal('register UNQ foreign asset', encodedProposal); + } - // >>> Acquire Unique AssetId Info on Moonbeam >>> - console.log('Acquire Unique AssetId Info on Moonbeam.......'); - - assetId = (await helper.assetManager.assetTypeId(uniqueAssetLocation)).toString(); + // >>> Acquire Unique AssetId Info on Moonbeam >>> + console.log('Acquire Unique AssetId Info on Moonbeam.......'); - console.log('UNQ asset ID is %s', assetId); - console.log('Acquire Unique AssetId Info on Moonbeam.......DONE'); - } + assetId = (await helper.assetManager.assetTypeId(uniqueAssetLocation)).toString(); + console.log('UNQ asset ID is %s', assetId); + console.log('Acquire Unique AssetId Info on Moonbeam.......DONE'); // >>> Acquire Unique AssetId Info on Moonbeam >>> // >>> Sponsoring random Account >>> From eae22fa8eb0bca097390f6ff28c87bb334959081 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Tue, 26 Sep 2023 12:40:18 +0000 Subject: [PATCH 054/143] refactor: xcm Unq tests --- tests/src/xcm/lowLevelXcmUnique.test.ts | 131 +++++------------------- tests/src/xcm/xcm.types.ts | 85 +++++++++++++++ tests/src/xcm/xcmUnique.test.ts | 72 ++++--------- 3 files changed, 127 insertions(+), 161 deletions(-) create mode 100644 tests/src/xcm/xcm.types.ts diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts index a95fc10ad4..7220804a99 100644 --- a/tests/src/xcm/lowLevelXcmUnique.test.ts +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -17,25 +17,11 @@ import {IKeyringPair} from '@polkadot/types/types'; import config from '../config'; import {itSub, expect, describeXCM, usingPlaygrounds, usingAcalaPlaygrounds, usingMoonbeamPlaygrounds, usingAstarPlaygrounds, usingPolkadexPlaygrounds} from '../util'; -import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; +import {Event} from '../util/playgrounds/unique.dev'; import {nToBigInt} from '@polkadot/util'; import {hexToString} from '@polkadot/util'; +import {ASTAR_DECIMALS, NETWORKS, SAFE_XCM_VERSION, UNIQUE_CHAIN, UNQ_DECIMALS, acalaUrl, astarUrl, expectFailedToTransact, expectUntrustedReserveLocationFail, getDevPlayground, mapToChainId, mapToChainUrl, maxWaitBlocks, moonbeamUrl, polkadexUrl, uniqueAssetId, uniqueVersionedMultilocation} from './xcm.types'; -const UNIQUE_CHAIN = +(process.env.RELAY_UNIQUE_ID || 2037); -const ACALA_CHAIN = +(process.env.RELAY_ACALA_ID || 2000); -const MOONBEAM_CHAIN = +(process.env.RELAY_MOONBEAM_ID || 2004); -const ASTAR_CHAIN = +(process.env.RELAY_ASTAR_ID || 2006); -const POLKADEX_CHAIN = +(process.env.RELAY_POLKADEX_ID || 2040); - - - -const acalaUrl = config.acalaUrl; -const moonbeamUrl = config.moonbeamUrl; -const astarUrl = config.astarUrl; -const polkadexUrl = config.polkadexUrl; - -const ASTAR_DECIMALS = 18n; -const UNQ_DECIMALS = 18n; const TRANSFER_AMOUNT = 2000000_000_000_000_000_000_000n; const SENDER_BUDGET = 2n * TRANSFER_AMOUNT; @@ -43,71 +29,11 @@ const SENDBACK_AMOUNT = TRANSFER_AMOUNT / 2n; const STAYED_ON_TARGET_CHAIN = TRANSFER_AMOUNT - SENDBACK_AMOUNT; const TARGET_CHAIN_TOKEN_TRANSFER_AMOUNT = 100_000_000_000n; -const SAFE_XCM_VERSION = 2; -const maxWaitBlocks = 6; - -const uniqueMultilocation = { - V2: { - parents: 1, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, - }, - }, - }, -}; - let balanceUniqueTokenInit: bigint; let balanceUniqueTokenMiddle: bigint; let balanceUniqueTokenFinal: bigint; let unqFees: bigint; -const expectFailedToTransact = async (helper: DevUniqueHelper, messageSent: any) => { - await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash - && event.outcome.isFailedToTransactAsset); -}; -const expectUntrustedReserveLocationFail = async (helper: DevUniqueHelper, messageSent: any) => { - await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash - && event.outcome.isUntrustedReserveLocation); -}; - -const NETWORKS = { - acala: usingAcalaPlaygrounds, - astar: usingAstarPlaygrounds, - polkadex: usingPolkadexPlaygrounds, - moonbeam: usingMoonbeamPlaygrounds, -} as const; - -function mapToChainId(networkName: keyof typeof NETWORKS) { - switch (networkName) { - case 'acala': - return ACALA_CHAIN; - case 'astar': - return ASTAR_CHAIN; - case 'moonbeam': - return MOONBEAM_CHAIN; - case 'polkadex': - return POLKADEX_CHAIN; - } -} - -function mapToChainUrl(networkName: keyof typeof NETWORKS): string { - switch (networkName) { - case 'acala': - return acalaUrl; - case 'astar': - return astarUrl; - case 'moonbeam': - return moonbeamUrl; - case 'polkadex': - return polkadexUrl; - } -} - -function getDevPlayground(name: T) { - return NETWORKS[name]; -} - async function genericSendUnqTo( networkName: keyof typeof NETWORKS, @@ -227,13 +153,13 @@ async function genericSendUnqBack( await targetPlayground(networkUrl, async (helper) => { if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueMultilocation, xcmProgram); + await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, xcmProgram); xcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, xcmProgram]); + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, xcmProgram]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal('sending MoonBeam -> Unique via XCM program', batchCall); + await helper.fastDemocracy.executeProposal(`sending ${networkName} -> Unique via XCM program`, batchCall); xcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } }); @@ -279,13 +205,13 @@ async function genericSendOnlyOwnedBalance( await targetPlayground(networkUrl, async (helper) => { if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueMultilocation, maliciousXcmProgram); + await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgram); maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, maliciousXcmProgram]); + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgram]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal('sending MoonBeam -> Unique via XCM program', batchCall); + await helper.fastDemocracy.executeProposal(`sending ${networkName} -> Unique via XCM program`, batchCall); maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } }); @@ -338,16 +264,15 @@ async function genericReserveTransferUNQfrom(netwokrName: keyof typeof NETWORKS, // Try to trick Unique using full UNQ identification await targetPlayground(networkUrl, async (helper) => { if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueMultilocation, maliciousXcmProgramFullId); + await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgramFullId); maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } // Moonbeam case else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, maliciousXcmProgramFullId]); - + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramFullId]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal('try to act like a reserve location for UNQ using path asset identification', batchCall); + await helper.fastDemocracy.executeProposal(`${netwokrName} try to act like a reserve location for UNQ using path asset identification`,batchCall); maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } @@ -362,15 +287,14 @@ async function genericReserveTransferUNQfrom(netwokrName: keyof typeof NETWORKS, // Try to trick Unique using shortened UNQ identification await targetPlayground(networkUrl, async (helper) => { if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueMultilocation, maliciousXcmProgramHereId); + await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgramHereId); maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, maliciousXcmProgramHereId]); - + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramHereId]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal('try to act like a reserve location for UNQ using "here" asset identification', batchCall); + await helper.fastDemocracy.executeProposal(`${netwokrName} try to act like a reserve location for UNQ using "here" asset identification`, batchCall); maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } @@ -405,19 +329,17 @@ async function genericRejectNativeToknsFrom(netwokrName: keyof typeof NETWORKS, ); await targetPlayground(networkUrl, async (helper) => { if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoerOnTargetChain, uniqueMultilocation, maliciousXcmProgramFullId); + await helper.getSudo().xcm.send(sudoerOnTargetChain, uniqueVersionedMultilocation, maliciousXcmProgramFullId); messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, maliciousXcmProgramFullId]); - + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramFullId]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal('sending native tokens to the Unique via fast democracy', batchCall); + await helper.fastDemocracy.executeProposal(`${netwokrName} sending native tokens to the Unique via fast democracy`, batchCall); messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } }); - await expectFailedToTransact(helper, messageSent); }); } @@ -493,17 +415,6 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Polkadex', () => { let alice: IKeyringPair; let randomAccount: IKeyringPair; - const uniqueAssetId = { - Concrete: { - parents: 1, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, - }, - }, - }, - }; - before(async () => { await usingPlaygrounds(async (helper, privateKey) => { alice = await privateKey('//Alice'); @@ -523,7 +434,9 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Polkadex', () => { if it is added again. Needed for debugging when this test is run multiple times. */ - if(!isWhitelisted) { + if(isWhitelisted) { + console.log('UNQ token is already whitelisted on Polkadex'); + } else { await helper.getSudo().xcmHelper.whitelistToken(alice, uniqueAssetId); } @@ -639,7 +552,7 @@ describeXCM('[XCMLL] Integration test: Exchanging UNQ with Moonbeam', () => { const numAssetsWeightHint = 0; if((await helper.assetManager.assetTypeId(uniqueAssetLocation)).toJSON()) { - console.log('Unique asset already registered'); + console.log('Unique asset already registered on Moonbeam'); } else { const encodedProposal = helper.assetManager.makeRegisterForeignAssetProposal({ location: uniqueAssetLocation, @@ -750,6 +663,8 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Astar', () => { console.log('3. Set UNQ payment for XCM execution on Astar'); await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.setAssetUnitsPerSecond', [assetLocation, unitsPerSecond]); + } else { + console.log('UNQ is already registered on Astar'); } console.log('4. Transfer 1 ASTR to recipient to create the account (needed due to existential balance)'); await helper.balance.transferToSubstrate(alice, randomAccount.address, astarInitialBalance); diff --git a/tests/src/xcm/xcm.types.ts b/tests/src/xcm/xcm.types.ts new file mode 100644 index 0000000000..6df7f247c9 --- /dev/null +++ b/tests/src/xcm/xcm.types.ts @@ -0,0 +1,85 @@ +import {usingAcalaPlaygrounds, usingAstarPlaygrounds, usingMoonbeamPlaygrounds, usingPolkadexPlaygrounds} from '../util'; +import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; +import config from '../config'; + +export const UNIQUE_CHAIN = +(process.env.RELAY_UNIQUE_ID || 2037); +export const STATEMINT_CHAIN = +(process.env.RELAY_STATEMINT_ID || 1000); +export const ACALA_CHAIN = +(process.env.RELAY_ACALA_ID || 2000); +export const MOONBEAM_CHAIN = +(process.env.RELAY_MOONBEAM_ID || 2004); +export const ASTAR_CHAIN = +(process.env.RELAY_ASTAR_ID || 2006); +export const POLKADEX_CHAIN = +(process.env.RELAY_POLKADEX_ID || 2040); + +export const acalaUrl = config.acalaUrl; +export const moonbeamUrl = config.moonbeamUrl; +export const astarUrl = config.astarUrl; +export const polkadexUrl = config.polkadexUrl; + +export const SAFE_XCM_VERSION = 3; + +export const maxWaitBlocks = 6; + + +export const ASTAR_DECIMALS = 18n; +export const UNQ_DECIMALS = 18n; + +export const uniqueMultilocation = { + parents: 1, + interior: { + X1: { + Parachain: UNIQUE_CHAIN, + }, + }, +}; +export const uniqueVersionedMultilocation = { + V3: uniqueMultilocation, +}; + +export const uniqueAssetId = { + Concrete: uniqueMultilocation, +}; + +export const expectFailedToTransact = async (helper: DevUniqueHelper, messageSent: any) => { + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash + && event.outcome.isFailedToTransactAsset); +}; +export const expectUntrustedReserveLocationFail = async (helper: DevUniqueHelper, messageSent: any) => { + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash + && event.outcome.isUntrustedReserveLocation); +}; + +export const NETWORKS = { + acala: usingAcalaPlaygrounds, + astar: usingAstarPlaygrounds, + polkadex: usingPolkadexPlaygrounds, + moonbeam: usingMoonbeamPlaygrounds, +} as const; + +export function mapToChainId(networkName: keyof typeof NETWORKS) { + switch (networkName) { + case 'acala': + return ACALA_CHAIN; + case 'astar': + return ASTAR_CHAIN; + case 'moonbeam': + return MOONBEAM_CHAIN; + case 'polkadex': + return POLKADEX_CHAIN; + } +} + +export function mapToChainUrl(networkName: keyof typeof NETWORKS): string { + switch (networkName) { + case 'acala': + return acalaUrl; + case 'astar': + return astarUrl; + case 'moonbeam': + return moonbeamUrl; + case 'polkadex': + return polkadexUrl; + } +} + +export function getDevPlayground(name: T) { + return NETWORKS[name]; +} \ No newline at end of file diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index 3e689fe9e6..454e95dbd0 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -17,15 +17,10 @@ import {IKeyringPair} from '@polkadot/types/types'; import config from '../config'; import {itSub, expect, describeXCM, usingPlaygrounds, usingAcalaPlaygrounds, usingRelayPlaygrounds, usingMoonbeamPlaygrounds, usingStatemintPlaygrounds, usingAstarPlaygrounds, usingPolkadexPlaygrounds} from '../util'; -import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; +import {Event} from '../util/playgrounds/unique.dev'; import {hexToString, nToBigInt} from '@polkadot/util'; +import {ACALA_CHAIN, ASTAR_CHAIN, MOONBEAM_CHAIN, POLKADEX_CHAIN, SAFE_XCM_VERSION, STATEMINT_CHAIN, UNIQUE_CHAIN, expectFailedToTransact, expectUntrustedReserveLocationFail, uniqueAssetId, uniqueVersionedMultilocation} from './xcm.types'; -const UNIQUE_CHAIN = +(process.env.RELAY_UNIQUE_ID || 2037); -const STATEMINT_CHAIN = +(process.env.RELAY_STATEMINT_ID || 1000); -const ACALA_CHAIN = +(process.env.RELAY_ACALA_ID || 2000); -const MOONBEAM_CHAIN = +(process.env.RELAY_MOONBEAM_ID || 2004); -const ASTAR_CHAIN = +(process.env.RELAY_ASTAR_ID || 2006); -const POLKADEX_CHAIN = +(process.env.RELAY_POLKADEX_ID || 2040); const STATEMINT_PALLET_INSTANCE = 50; @@ -55,28 +50,6 @@ const USDT_ASSET_METADATA_DESCRIPTION = 'USDT'; const USDT_ASSET_METADATA_MINIMAL_BALANCE = 1n; const USDT_ASSET_AMOUNT = 10_000_000_000_000_000_000_000_000n; -const SAFE_XCM_VERSION = 2; -const maxWaitBlocks = 6; - -const uniqueMultilocation = { - V2: { - parents: 1, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, - }, - }, - }, -}; - -const expectFailedToTransact = async (helper: DevUniqueHelper, messageSent: any) => { - await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash - && event.outcome.isFailedToTransactAsset); -}; -const expectUntrustedReserveLocationFail = async (helper: DevUniqueHelper, messageSent: any) => { - await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash - && event.outcome.isUntrustedReserveLocation); -}; describeXCM('[XCM] Integration test: Exchanging USDT with Statemint', () => { let alice: IKeyringPair; let bob: IKeyringPair; @@ -704,7 +677,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Acala', () => { // Try to trick Unique await usingAcalaPlaygrounds(acalaUrl, async (helper) => { - await helper.getSudo().xcm.send(alice, uniqueMultilocation, maliciousXcmProgram); + await helper.getSudo().xcm.send(alice, uniqueVersionedMultilocation, maliciousXcmProgram); maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); }); @@ -729,7 +702,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Acala', () => { ); await usingAcalaPlaygrounds(acalaUrl, async (helper) => { - await helper.getSudo().xcm.send(alice, uniqueMultilocation, validXcmProgram); + await helper.getSudo().xcm.send(alice, uniqueVersionedMultilocation, validXcmProgram); }); await helper.wait.newBlocks(maxWaitBlocks); @@ -820,17 +793,6 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { let balanceUniqueTokenFinal: bigint; const maxWaitBlocks = 6; - const uniqueAssetId = { - Concrete: { - parents: 1, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, - }, - }, - }, - }; - before(async () => { await usingPlaygrounds(async (helper, privateKey) => { alice = await privateKey('//Alice'); @@ -850,7 +812,9 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { if it is added again. Needed for debugging when this test is run multiple times. */ - if(!isWhitelisted) { + if(isWhitelisted) { + console.log('UNQ token is already whitelisted on Polkadex'); + } else { await helper.getSudo().xcmHelper.whitelistToken(alice, uniqueAssetId); } @@ -951,7 +915,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { - await helper.getSudo().xcm.send(alice, uniqueMultilocation, xcmProgram); + await helper.getSudo().xcm.send(alice, uniqueVersionedMultilocation, xcmProgram); xcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); }); @@ -986,7 +950,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { await usingPolkadexPlaygrounds(polkadexUrl, async (helper) => { - await helper.getSudo().xcm.send(alice, uniqueMultilocation, maliciousXcmProgram); + await helper.getSudo().xcm.send(alice, uniqueVersionedMultilocation, maliciousXcmProgram); maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); }); @@ -1465,7 +1429,7 @@ describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { // Try to trick Unique await usingMoonbeamPlaygrounds(moonbeamUrl, async (helper) => { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, maliciousXcmProgram]); + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgram]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); @@ -1494,7 +1458,7 @@ describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { ); await usingMoonbeamPlaygrounds(moonbeamUrl, async (helper) => { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, validXcmProgram]); + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, validXcmProgram]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); @@ -1543,7 +1507,7 @@ describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { // Try to trick Unique using full UNQ identification await usingMoonbeamPlaygrounds(moonbeamUrl, async (helper) => { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, maliciousXcmProgramFullId]); + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramFullId]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); @@ -1560,7 +1524,7 @@ describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { // Try to trick Unique using shortened UNQ identification await usingMoonbeamPlaygrounds(moonbeamUrl, async (helper) => { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueMultilocation, maliciousXcmProgramHereId]); + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramHereId]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); @@ -1641,6 +1605,8 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { console.log('3. Set UNQ payment for XCM execution on Astar'); await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.setAssetUnitsPerSecond', [assetLocation, unitsPerSecond]); + } else { + console.log('UNQ is already registered on Astar'); } console.log('4. Transfer 1 ASTR to recipient to create the account (needed due to existential balance)'); await helper.balance.transferToSubstrate(alice, randomAccount.address, astarInitialBalance); @@ -1815,7 +1781,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { // Try to trick Unique await usingAstarPlaygrounds(astarUrl, async (helper) => { - await helper.getSudo().xcm.send(alice, uniqueMultilocation, maliciousXcmProgram); + await helper.getSudo().xcm.send(alice, uniqueVersionedMultilocation, maliciousXcmProgram); maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); }); @@ -1840,7 +1806,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { ); await usingAstarPlaygrounds(astarUrl, async (helper) => { - await helper.getSudo().xcm.send(alice, uniqueMultilocation, validXcmProgram); + await helper.getSudo().xcm.send(alice, uniqueVersionedMultilocation, validXcmProgram); }); await helper.wait.newBlocks(maxWaitBlocks); @@ -1885,7 +1851,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { // Try to trick Unique using full UNQ identification await usingAstarPlaygrounds(astarUrl, async (helper) => { - await helper.getSudo().xcm.send(alice, uniqueMultilocation, maliciousXcmProgramFullId); + await helper.getSudo().xcm.send(alice, uniqueVersionedMultilocation, maliciousXcmProgramFullId); maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); }); @@ -1898,7 +1864,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { // Try to trick Unique using shortened UNQ identification await usingAstarPlaygrounds(astarUrl, async (helper) => { - await helper.getSudo().xcm.send(alice, uniqueMultilocation, maliciousXcmProgramHereId); + await helper.getSudo().xcm.send(alice, uniqueVersionedMultilocation, maliciousXcmProgramHereId); maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); }); From 9f2f03a30137ab5739d1cd670cd2ffebbfd3347d Mon Sep 17 00:00:00 2001 From: PraetorP Date: Wed, 27 Sep 2023 08:58:40 +0000 Subject: [PATCH 055/143] test(xcmUnique): switch to `uniqueAssetId` , typo --- tests/src/xcm/lowLevelXcmUnique.test.ts | 30 ++++---------- tests/src/xcm/xcmUnique.test.ts | 53 +++---------------------- 2 files changed, 12 insertions(+), 71 deletions(-) diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts index 7220804a99..56a3efe975 100644 --- a/tests/src/xcm/lowLevelXcmUnique.test.ts +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -137,14 +137,7 @@ async function genericSendUnqBack( const xcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( randomAccountOnUnq.addressRaw, - { - Concrete: { - parents: 1, - interior: { - X1: {Parachain: UNIQUE_CHAIN}, - }, - }, - }, + uniqueAssetId, SENDBACK_AMOUNT, ); @@ -233,16 +226,7 @@ async function genericReserveTransferUNQfrom(netwokrName: keyof typeof NETWORKS, const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( targetAccount.addressRaw, - { - Concrete: { - parents: 1, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, - }, - }, - }, - }, + uniqueAssetId, testAmount, ); @@ -307,9 +291,9 @@ async function genericReserveTransferUNQfrom(netwokrName: keyof typeof NETWORKS, }); } -async function genericRejectNativeToknsFrom(netwokrName: keyof typeof NETWORKS, sudoerOnTargetChain: IKeyringPair) { - const networkUrl = mapToChainUrl(netwokrName); - const targetPlayground = getDevPlayground(netwokrName); +async function genericRejectNativeToknsFrom(networkName: keyof typeof NETWORKS, sudoerOnTargetChain: IKeyringPair) { + const networkUrl = mapToChainUrl(networkName); + const targetPlayground = getDevPlayground(networkName); let messageSent: any; await usingPlaygrounds(async (helper) => { @@ -320,7 +304,7 @@ async function genericRejectNativeToknsFrom(netwokrName: keyof typeof NETWORKS, parents: 1, interior: { X1: { - Parachain: mapToChainId(netwokrName), + Parachain: mapToChainId(networkName), }, }, }, @@ -335,7 +319,7 @@ async function genericRejectNativeToknsFrom(netwokrName: keyof typeof NETWORKS, const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramFullId]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal(`${netwokrName} sending native tokens to the Unique via fast democracy`, batchCall); + await helper.fastDemocracy.executeProposal(`${networkName} sending native tokens to the Unique via fast democracy`, batchCall); messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index 454e95dbd0..531d4f519f 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -728,16 +728,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Acala', () => { const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( targetAccount.addressRaw, - { - Concrete: { - parents: 1, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, - }, - }, - }, - }, + uniqueAssetId, testAmount, ); @@ -900,14 +891,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { const xcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( randomAccount.addressRaw, - { - Concrete: { - parents: 1, - interior: { - X1: {Parachain: UNIQUE_CHAIN}, - }, - }, - }, + uniqueAssetId, TRANSFER_AMOUNT, ); @@ -978,16 +962,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Polkadex', () => { const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( targetAccount.addressRaw, - { - Concrete: { - parents: 1, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, - }, - }, - }, - }, + uniqueAssetId, testAmount, ); @@ -1477,16 +1452,7 @@ describeXCM('[XCM] Integration test: Exchanging UNQ with Moonbeam', () => { const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( targetAccount.addressRaw, - { - Concrete: { - parents: 0, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, - }, - }, - }, - }, + uniqueAssetId, testAmount, ); @@ -1821,16 +1787,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( targetAccount.addressRaw, - { - Concrete: { - parents: 1, - interior: { - X1: { - Parachain: UNIQUE_CHAIN, - }, - }, - }, - }, + uniqueAssetId, testAmount, ); From dc6f0e24d331bcba9e5d0277a67575d6d5233d1c Mon Sep 17 00:00:00 2001 From: PraetorP Date: Wed, 27 Sep 2023 09:15:24 +0000 Subject: [PATCH 056/143] fix(test xcm Unq): typos --- tests/src/xcm/lowLevelXcmUnique.test.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts index 56a3efe975..7f11634deb 100644 --- a/tests/src/xcm/lowLevelXcmUnique.test.ts +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -291,7 +291,7 @@ async function genericReserveTransferUNQfrom(netwokrName: keyof typeof NETWORKS, }); } -async function genericRejectNativeToknsFrom(networkName: keyof typeof NETWORKS, sudoerOnTargetChain: IKeyringPair) { +async function genericRejectNativeTokensFrom(networkName: keyof typeof NETWORKS, sudoerOnTargetChain: IKeyringPair) { const networkUrl = mapToChainUrl(networkName); const targetPlayground = getDevPlayground(networkName); let messageSent: any; @@ -466,19 +466,19 @@ describeXCM('[XCMLL] Integration test: Unique rejects non-native tokens', () => }); itSub('Unique rejects ACA tokens from Acala', async () => { - await genericRejectNativeToknsFrom('acala', alice); + await genericRejectNativeTokensFrom('acala', alice); }); itSub('Unique rejects GLMR tokens from Moonbeam', async () => { - await genericRejectNativeToknsFrom('moonbeam', alice); + await genericRejectNativeTokensFrom('moonbeam', alice); }); itSub('Unique rejects ASTR tokens from Astar', async () => { - await genericRejectNativeToknsFrom('astar', alice); + await genericRejectNativeTokensFrom('astar', alice); }); itSub('Unique rejects PDX tokens from Polkadex', async () => { - await genericRejectNativeToknsFrom('polkadex', alice); + await genericRejectNativeTokensFrom('polkadex', alice); }); }); From 61fc26e2c8d23cd0edd6c434d4538fbc95030154 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Thu, 28 Sep 2023 08:48:17 +0000 Subject: [PATCH 057/143] test(xcm qtz): added `XcmTestHelper` --- tests/package.json | 2 + tests/src/xcm/lowLevelXcmQuartz.test.ts | 381 +++++++++++++++++++++++ tests/src/xcm/lowLevelXcmUnique.test.ts | 19 +- tests/src/xcm/xcm.types.ts | 398 +++++++++++++++++++++++- tests/src/xcm/xcmQuartz.test.ts | 20 +- 5 files changed, 788 insertions(+), 32 deletions(-) create mode 100644 tests/src/xcm/lowLevelXcmQuartz.test.ts diff --git a/tests/package.json b/tests/package.json index 48371c4636..3138fbe6a5 100644 --- a/tests/package.json +++ b/tests/package.json @@ -116,6 +116,8 @@ "testXcmUnique": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmUnique.test.ts", "testFullXcmUnique": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/*Unique.test.ts", "testXcmQuartz": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmQuartz.test.ts", + "testLowLevelXcmQuartz": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/lowLevelXcmQuartz.test.ts", + "testFullXcmQuartz": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/*Quartz.test.ts", "testXcmOpal": "RUN_XCM_TESTS=1 yarn _test ./**/xcm/xcmOpal.test.ts", "testXcmTransferAcala": "yarn _test ./**/xcm/xcmTransferAcala.test.ts acalaId=2000 uniqueId=5000", "testXcmTransferStatemine": "yarn _test ./**/xcm/xcmTransferStatemine.test.ts statemineId=1000 uniqueId=5000", diff --git a/tests/src/xcm/lowLevelXcmQuartz.test.ts b/tests/src/xcm/lowLevelXcmQuartz.test.ts new file mode 100644 index 0000000000..41a1258062 --- /dev/null +++ b/tests/src/xcm/lowLevelXcmQuartz.test.ts @@ -0,0 +1,381 @@ +// Copyright 2019-2022 Unique Network (Gibraltar) Ltd. +// This file is part of Unique Network. + +// Unique Network is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Unique Network is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Unique Network. If not, see . + +import {IKeyringPair} from '@polkadot/types/types'; +import {itSub, expect, describeXCM, usingPlaygrounds, usingKaruraPlaygrounds, usingMoonriverPlaygrounds, usingShidenPlaygrounds} from '../util'; +import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; +import {STATEMINE_CHAIN, QUARTZ_CHAIN, KARURA_CHAIN, MOONRIVER_CHAIN, SHIDEN_CHAIN, STATEMINE_DECIMALS, KARURA_DECIMALS, QTZ_DECIMALS, RELAY_DECIMALS, SHIDEN_DECIMALS, karuraUrl, moonriverUrl, relayUrl, shidenUrl, statemineUrl, SAFE_XCM_VERSION, XcmTestHelper, TRANSFER_AMOUNT} from './xcm.types'; + + +const testHelper = new XcmTestHelper('quartz'); + +describeXCM('[XCMLL] Integration test: Exchanging tokens with Karura', () => { + let alice: IKeyringPair; + let randomAccount: IKeyringPair; + + let balanceQuartzTokenInit: bigint; + let balanceQuartzTokenMiddle: bigint; + let balanceQuartzTokenFinal: bigint; + let balanceKaruraTokenInit: bigint; + let balanceKaruraTokenMiddle: bigint; + let balanceKaruraTokenFinal: bigint; + let balanceQuartzForeignTokenInit: bigint; + let balanceQuartzForeignTokenMiddle: bigint; + let balanceQuartzForeignTokenFinal: bigint; + + // computed by a test transfer from prod Quartz to prod Karura. + // 2 QTZ sent https://quartz.subscan.io/xcm_message/kusama-f60d821b049f8835a3005ce7102285006f5b61e9 + // 1.919176000000000000 QTZ received (you can check Karura's chain state in the corresponding block) + const expectedKaruraIncomeFee = 2000000000000000000n - 1919176000000000000n; + const karuraEps = 8n * 10n ** 16n; + + let karuraBackwardTransferAmount: bigint; + + before(async () => { + await usingPlaygrounds(async (helper, privateKey) => { + alice = await privateKey('//Alice'); + [randomAccount] = await helper.arrange.createAccounts([0n], alice); + + // Set the default version to wrap the first message to other chains. + await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); + }); + + await usingKaruraPlaygrounds(karuraUrl, async (helper) => { + const destination = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: QUARTZ_CHAIN, + }, + }, + }, + }; + + const metadata = { + name: 'Quartz', + symbol: 'QTZ', + decimals: 18, + minimalBalance: 1000000000000000000n, + }; + + await helper.getSudo().assetRegistry.registerForeignAsset(alice, destination, metadata); + await helper.balance.transferToSubstrate(alice, randomAccount.address, 10000000000000n); + balanceKaruraTokenInit = await helper.balance.getSubstrate(randomAccount.address); + balanceQuartzForeignTokenInit = await helper.tokens.accounts(randomAccount.address, {ForeignAsset: 0}); + }); + + await usingPlaygrounds(async (helper) => { + await helper.balance.transferToSubstrate(alice, randomAccount.address, 10n * TRANSFER_AMOUNT); + balanceQuartzTokenInit = await helper.balance.getSubstrate(randomAccount.address); + }); + }); + + itSub('Should connect and send QTZ to Karura', async () => { + await testHelper.sendUnqTo('karura', randomAccount); + }); + + itSub('Should connect to Karura and send QTZ back', async () => { + await testHelper.sendUnqBack('karura', alice, randomAccount); + }); + + itSub('Karura can send only up to its balance', async () => { + await testHelper.sendOnlyOwnedBalance('karura', alice); + }); +}); +// These tests are relevant only when +// the the corresponding foreign assets are not registered +describeXCM('[XCMLL] Integration test: Quartz rejects non-native tokens', () => { + let alice: IKeyringPair; + let alith: IKeyringPair; + + const testAmount = 100_000_000_000n; + let quartzParachainJunction; + let quartzAccountJunction; + + let quartzParachainMultilocation: any; + let quartzAccountMultilocation: any; + let quartzCombinedMultilocation: any; + + let messageSent: any; + + const maxWaitBlocks = 3; + + before(async () => { + await usingPlaygrounds(async (helper, privateKey) => { + alice = await privateKey('//Alice'); + + quartzParachainJunction = {Parachain: QUARTZ_CHAIN}; + quartzAccountJunction = { + AccountId32: { + network: 'Any', + id: alice.addressRaw, + }, + }; + + quartzParachainMultilocation = { + V2: { + parents: 1, + interior: { + X1: quartzParachainJunction, + }, + }, + }; + + quartzAccountMultilocation = { + V2: { + parents: 0, + interior: { + X1: quartzAccountJunction, + }, + }, + }; + + quartzCombinedMultilocation = { + V2: { + parents: 1, + interior: { + X2: [quartzParachainJunction, quartzAccountJunction], + }, + }, + }; + + // Set the default version to wrap the first message to other chains. + await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); + }); + + // eslint-disable-next-line require-await + await usingMoonriverPlaygrounds(moonriverUrl, async (helper) => { + alith = helper.account.alithAccount(); + }); + }); + + itSub('Quartz rejects KAR tokens from Karura', async () => { + await testHelper.rejectNativeTokensFrom('karura', alice); + }); + + itSub('Quartz rejects MOVR tokens from Moonriver', async () => { + await testHelper.rejectNativeTokensFrom('moonriver', alice); + }); + + itSub('Quartz rejects SDN tokens from Shiden', async () => { + await testHelper.rejectNativeTokensFrom('shiden', alice); + }); +}); + +describeXCM('[XCMLL] Integration test: Exchanging QTZ with Moonriver', () => { + // Quartz constants + let alice: IKeyringPair; + let quartzAssetLocation; + + let randomAccountQuartz: IKeyringPair; + let randomAccountMoonriver: IKeyringPair; + + // Moonriver constants + let assetId: string; + + const quartzAssetMetadata = { + name: 'xcQuartz', + symbol: 'xcQTZ', + decimals: 18, + isFrozen: false, + minimalBalance: 1n, + }; + + let balanceQuartzTokenInit: bigint; + let balanceQuartzTokenMiddle: bigint; + let balanceQuartzTokenFinal: bigint; + let balanceForeignQtzTokenInit: bigint; + let balanceForeignQtzTokenMiddle: bigint; + let balanceForeignQtzTokenFinal: bigint; + let balanceMovrTokenInit: bigint; + let balanceMovrTokenMiddle: bigint; + let balanceMovrTokenFinal: bigint; + + before(async () => { + await usingPlaygrounds(async (helper, privateKey) => { + alice = await privateKey('//Alice'); + [randomAccountQuartz] = await helper.arrange.createAccounts([0n], alice); + + balanceForeignQtzTokenInit = 0n; + + // Set the default version to wrap the first message to other chains. + await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); + }); + + await usingMoonriverPlaygrounds(moonriverUrl, async (helper) => { + const alithAccount = helper.account.alithAccount(); + const baltatharAccount = helper.account.baltatharAccount(); + const dorothyAccount = helper.account.dorothyAccount(); + + randomAccountMoonriver = helper.account.create(); + + // >>> Sponsoring Dorothy >>> + console.log('Sponsoring Dorothy.......'); + await helper.balance.transferToEthereum(alithAccount, dorothyAccount.address, 11_000_000_000_000_000_000n); + console.log('Sponsoring Dorothy.......DONE'); + // <<< Sponsoring Dorothy <<< + + quartzAssetLocation = { + XCM: { + parents: 1, + interior: {X1: {Parachain: QUARTZ_CHAIN}}, + }, + }; + const existentialDeposit = 1n; + const isSufficient = true; + const unitsPerSecond = 1n; + const numAssetsWeightHint = 0; + + const encodedProposal = helper.assetManager.makeRegisterForeignAssetProposal({ + location: quartzAssetLocation, + metadata: quartzAssetMetadata, + existentialDeposit, + isSufficient, + unitsPerSecond, + numAssetsWeightHint, + }); + + console.log('Encoded proposal for registerForeignAsset & setAssetUnitsPerSecond is %s', encodedProposal); + + await helper.fastDemocracy.executeProposal('register QTZ foreign asset', encodedProposal); + + // >>> Acquire Quartz AssetId Info on Moonriver >>> + console.log('Acquire Quartz AssetId Info on Moonriver.......'); + + assetId = (await helper.assetManager.assetTypeId(quartzAssetLocation)).toString(); + + console.log('QTZ asset ID is %s', assetId); + console.log('Acquire Quartz AssetId Info on Moonriver.......DONE'); + // >>> Acquire Quartz AssetId Info on Moonriver >>> + + // >>> Sponsoring random Account >>> + console.log('Sponsoring random Account.......'); + await helper.balance.transferToEthereum(baltatharAccount, randomAccountMoonriver.address, 11_000_000_000_000_000_000n); + console.log('Sponsoring random Account.......DONE'); + // <<< Sponsoring random Account <<< + + balanceMovrTokenInit = await helper.balance.getEthereum(randomAccountMoonriver.address); + }); + + await usingPlaygrounds(async (helper) => { + await helper.balance.transferToSubstrate(alice, randomAccountQuartz.address, 10n * TRANSFER_AMOUNT); + balanceQuartzTokenInit = await helper.balance.getSubstrate(randomAccountQuartz.address); + }); + }); + + itSub('Should connect and send QTZ to Moonriver', async () => { + await testHelper.sendUnqTo('moonriver', randomAccountQuartz, randomAccountMoonriver); + }); + + itSub('Should connect to Moonriver and send QTZ back', async () => { + await testHelper.sendUnqBack('moonriver', alice, randomAccountQuartz); + }); + + itSub('Moonriver can send only up to its balance', async () => { + await testHelper.sendOnlyOwnedBalance('moonriver', alice); + }); + + itSub('Should not accept reserve transfer of QTZ from Moonriver', async () => { + await testHelper.reserveTransferUNQfrom('moonriver', alice); + }); +}); + +describeXCM('[XCMLL] Integration test: Exchanging tokens with Shiden', () => { + let alice: IKeyringPair; + let sender: IKeyringPair; + + const QTZ_ASSET_ID_ON_SHIDEN = 1; + const QTZ_MINIMAL_BALANCE_ON_SHIDEN = 1n; + + // Quartz -> Shiden + const shidenInitialBalance = 1n * (10n ** SHIDEN_DECIMALS); // 1 SHD, existential deposit required to actually create the account on Shiden + const unitsPerSecond = 228_000_000_000n; // This is Phala's value. What will be ours? + const qtzToShidenTransferred = 10n * (10n ** QTZ_DECIMALS); // 10 QTZ + const qtzToShidenArrived = 9_999_999_999_088_000_000n; // 9.999 ... QTZ, Shiden takes a commision in foreign tokens + + // Shiden -> Quartz + const qtzFromShidenTransfered = 5n * (10n ** QTZ_DECIMALS); // 5 QTZ + const qtzOnShidenLeft = qtzToShidenArrived - qtzFromShidenTransfered; // 4.999_999_999_088_000_000n QTZ + + let balanceAfterQuartzToShidenXCM: bigint; + + before(async () => { + await usingPlaygrounds(async (helper, privateKey) => { + alice = await privateKey('//Alice'); + [sender] = await helper.arrange.createAccounts([100n], alice); + console.log('sender', sender.address); + + // Set the default version to wrap the first message to other chains. + await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); + }); + + await usingShidenPlaygrounds(shidenUrl, async (helper) => { + console.log('1. Create foreign asset and metadata'); + // TODO update metadata with values from production + await helper.assets.create( + alice, + QTZ_ASSET_ID_ON_SHIDEN, + alice.address, + QTZ_MINIMAL_BALANCE_ON_SHIDEN, + ); + + await helper.assets.setMetadata( + alice, + QTZ_ASSET_ID_ON_SHIDEN, + 'Cross chain QTZ', + 'xcQTZ', + Number(QTZ_DECIMALS), + ); + + console.log('2. Register asset location on Shiden'); + const assetLocation = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: QUARTZ_CHAIN, + }, + }, + }, + }; + + await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.registerAssetLocation', [assetLocation, QTZ_ASSET_ID_ON_SHIDEN]); + + console.log('3. Set QTZ payment for XCM execution on Shiden'); + await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.setAssetUnitsPerSecond', [assetLocation, unitsPerSecond]); + + console.log('4. Transfer 1 SDN to recipient to create the account (needed due to existential balance)'); + await helper.balance.transferToSubstrate(alice, sender.address, shidenInitialBalance); + }); + }); + + itSub('Should connect and send QTZ to Shiden', async () => { + await testHelper.sendUnqTo('shiden', sender); + }); + + itSub('Should connect to Shiden and send QTZ back', async () => { + await testHelper.sendUnqBack('shiden', alice, sender); + }); + + itSub('Shiden can send only up to its balance', async () => { + await testHelper.sendOnlyOwnedBalance('shiden', alice); + }); + + itSub('Should not accept reserve transfer of QTZ from Shiden', async () => { + await testHelper.reserveTransferUNQfrom('shiden', alice); + }); +}); diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts index 7f11634deb..dadc44364d 100644 --- a/tests/src/xcm/lowLevelXcmUnique.test.ts +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -20,7 +20,7 @@ import {itSub, expect, describeXCM, usingPlaygrounds, usingAcalaPlaygrounds, usi import {Event} from '../util/playgrounds/unique.dev'; import {nToBigInt} from '@polkadot/util'; import {hexToString} from '@polkadot/util'; -import {ASTAR_DECIMALS, NETWORKS, SAFE_XCM_VERSION, UNIQUE_CHAIN, UNQ_DECIMALS, acalaUrl, astarUrl, expectFailedToTransact, expectUntrustedReserveLocationFail, getDevPlayground, mapToChainId, mapToChainUrl, maxWaitBlocks, moonbeamUrl, polkadexUrl, uniqueAssetId, uniqueVersionedMultilocation} from './xcm.types'; +import {ASTAR_DECIMALS, NETWORKS, SAFE_XCM_VERSION, UNIQUE_CHAIN, UNQ_DECIMALS, XcmTestHelper, acalaUrl, astarUrl, expectFailedToTransact, expectUntrustedReserveLocationFail, getDevPlayground, mapToChainId, mapToChainUrl, maxWaitBlocks, moonbeamUrl, polkadexUrl, uniqueAssetId, uniqueVersionedMultilocation} from './xcm.types'; const TRANSFER_AMOUNT = 2000000_000_000_000_000_000_000n; @@ -34,6 +34,7 @@ let balanceUniqueTokenMiddle: bigint; let balanceUniqueTokenFinal: bigint; let unqFees: bigint; +const testHelper = new XcmTestHelper('unique'); async function genericSendUnqTo( networkName: keyof typeof NETWORKS, @@ -434,20 +435,20 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Polkadex', () => { }); itSub('Should connect and send UNQ to Polkadex', async () => { - await genericSendUnqTo('polkadex', randomAccount); + await testHelper.sendUnqTo('polkadex', randomAccount); }); itSub('Should connect to Polkadex and send UNQ back', async () => { - await genericSendUnqBack('polkadex', alice, randomAccount); + await testHelper.sendUnqBack('polkadex', alice, randomAccount); }); itSub('Polkadex can send only up to its balance', async () => { - await genericSendOnlyOwnedBalance('polkadex', alice); + await testHelper.sendOnlyOwnedBalance('polkadex', alice); }); itSub('Should not accept reserve transfer of UNQ from Polkadex', async () => { - await genericReserveTransferUNQfrom('polkadex', alice); + await testHelper.reserveTransferUNQfrom('polkadex', alice); }); }); @@ -574,19 +575,19 @@ describeXCM('[XCMLL] Integration test: Exchanging UNQ with Moonbeam', () => { }); itSub('Should connect and send UNQ to Moonbeam', async () => { - await genericSendUnqTo('moonbeam', randomAccountUnique, randomAccountMoonbeam); + await testHelper.sendUnqTo('moonbeam', randomAccountUnique, randomAccountMoonbeam); }); itSub('Should connect to Moonbeam and send UNQ back', async () => { - await genericSendUnqBack('moonbeam', alice, randomAccountUnique); + await testHelper.sendUnqBack('moonbeam', alice, randomAccountUnique); }); itSub('Moonbeam can send only up to its balance', async () => { - await genericSendOnlyOwnedBalance('moonbeam', alice); + await testHelper.sendOnlyOwnedBalance('moonbeam', alice); }); itSub('Should not accept reserve transfer of UNQ from Moonbeam', async () => { - await genericReserveTransferUNQfrom('moonbeam', alice); + await testHelper.reserveTransferUNQfrom('moonbeam', alice); }); }); diff --git a/tests/src/xcm/xcm.types.ts b/tests/src/xcm/xcm.types.ts index 6df7f247c9..cf6e6fd10c 100644 --- a/tests/src/xcm/xcm.types.ts +++ b/tests/src/xcm/xcm.types.ts @@ -1,4 +1,5 @@ -import {usingAcalaPlaygrounds, usingAstarPlaygrounds, usingMoonbeamPlaygrounds, usingPolkadexPlaygrounds} from '../util'; +import {IKeyringPair} from '@polkadot/types/types'; +import {expect, usingAcalaPlaygrounds, usingAstarPlaygrounds, usingKaruraPlaygrounds, usingMoonbeamPlaygrounds, usingMoonriverPlaygrounds, usingPlaygrounds, usingPolkadexPlaygrounds, usingShidenPlaygrounds} from '../util'; import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; import config from '../config'; @@ -9,19 +10,39 @@ export const MOONBEAM_CHAIN = +(process.env.RELAY_MOONBEAM_ID || 2004); export const ASTAR_CHAIN = +(process.env.RELAY_ASTAR_ID || 2006); export const POLKADEX_CHAIN = +(process.env.RELAY_POLKADEX_ID || 2040); +export const QUARTZ_CHAIN = +(process.env.RELAY_QUARTZ_ID || 2095); +export const STATEMINE_CHAIN = +(process.env.RELAY_STATEMINE_ID || 1000); +export const KARURA_CHAIN = +(process.env.RELAY_KARURA_ID || 2000); +export const MOONRIVER_CHAIN = +(process.env.RELAY_MOONRIVER_ID || 2023); +export const SHIDEN_CHAIN = +(process.env.RELAY_SHIDEN_ID || 2007); + +export const relayUrl = config.relayUrl; +export const statemintUrl = config.statemintUrl; +export const statemineUrl = config.statemineUrl; + export const acalaUrl = config.acalaUrl; export const moonbeamUrl = config.moonbeamUrl; export const astarUrl = config.astarUrl; export const polkadexUrl = config.polkadexUrl; +export const karuraUrl = config.karuraUrl; +export const moonriverUrl = config.moonriverUrl; +export const shidenUrl = config.shidenUrl; + export const SAFE_XCM_VERSION = 3; -export const maxWaitBlocks = 6; +export const RELAY_DECIMALS = 12; +export const STATEMINE_DECIMALS = 12; +export const KARURA_DECIMALS = 12; +export const SHIDEN_DECIMALS = 18n; +export const QTZ_DECIMALS = 18n; export const ASTAR_DECIMALS = 18n; export const UNQ_DECIMALS = 18n; +export const maxWaitBlocks = 6; + export const uniqueMultilocation = { parents: 1, interior: { @@ -52,9 +73,15 @@ export const NETWORKS = { astar: usingAstarPlaygrounds, polkadex: usingPolkadexPlaygrounds, moonbeam: usingMoonbeamPlaygrounds, + moonriver: usingMoonriverPlaygrounds, + karura: usingKaruraPlaygrounds, + shiden: usingShidenPlaygrounds, } as const; +type NetworkNames = keyof typeof NETWORKS; + +type NativeRuntime = 'opal' | 'quartz' | 'unique'; -export function mapToChainId(networkName: keyof typeof NETWORKS) { +export function mapToChainId(networkName: keyof typeof NETWORKS): number { switch (networkName) { case 'acala': return ACALA_CHAIN; @@ -64,10 +91,16 @@ export function mapToChainId(networkName: keyof typeof NETWORKS) { return MOONBEAM_CHAIN; case 'polkadex': return POLKADEX_CHAIN; + case 'moonriver': + return MOONRIVER_CHAIN; + case 'karura': + return KARURA_CHAIN; + case 'shiden': + return SHIDEN_CHAIN; } } -export function mapToChainUrl(networkName: keyof typeof NETWORKS): string { +export function mapToChainUrl(networkName: NetworkNames): string { switch (networkName) { case 'acala': return acalaUrl; @@ -77,9 +110,364 @@ export function mapToChainUrl(networkName: keyof typeof NETWORKS): string { return moonbeamUrl; case 'polkadex': return polkadexUrl; + case 'moonriver': + return moonriverUrl; + case 'karura': + return karuraUrl; + case 'shiden': + return shidenUrl; } } -export function getDevPlayground(name: T) { +export function getDevPlayground(name: NetworkNames) { return NETWORKS[name]; +} + +export const TRANSFER_AMOUNT = 2000000_000_000_000_000_000_000n; +const SENDER_BUDGET = 2n * TRANSFER_AMOUNT; +const SENDBACK_AMOUNT = TRANSFER_AMOUNT / 2n; +const STAYED_ON_TARGET_CHAIN = TRANSFER_AMOUNT - SENDBACK_AMOUNT; +const TARGET_CHAIN_TOKEN_TRANSFER_AMOUNT = 100_000_000_000n; + +export class XcmTestHelper { + private _balanceUniqueTokenInit: bigint = 0n; + private _balanceUniqueTokenMiddle: bigint = 0n; + private _balanceUniqueTokenFinal: bigint = 0n; + private _unqFees: bigint = 0n; + private _nativeRuntime: NativeRuntime; + + constructor(runtime: NativeRuntime) { + this._nativeRuntime = runtime; + } + + private _getNativeId() { + switch (this._nativeRuntime) { + case 'opal': + // To-Do + return 10; + case 'quartz': + return QUARTZ_CHAIN; + case 'unique': + return UNIQUE_CHAIN; + } + } + private _isAddress20FormatFor(network: NetworkNames) { + switch (network) { + case 'moonbeam': + case 'moonriver': + return true; + default: + return false; + } + } + + async sendUnqTo( + networkName: keyof typeof NETWORKS, + randomAccount: IKeyringPair, + randomAccountOnTargetChain = randomAccount, + ) { + const networkUrl = mapToChainUrl(networkName); + const targetPlayground = getDevPlayground(networkName); + await usingPlaygrounds(async (helper) => { + this._balanceUniqueTokenInit = await helper.balance.getSubstrate(randomAccount.address); + const destination = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: mapToChainId(networkName), + }, + }, + }, + }; + + const beneficiary = { + V2: { + parents: 0, + interior: { + X1: ( + this._isAddress20FormatFor(networkName) ? + { + AccountKey20: { + network: 'Any', + key: randomAccountOnTargetChain.address, + }, + } + : + { + AccountId32: { + network: 'Any', + id: randomAccountOnTargetChain.addressRaw, + }, + } + ), + }, + }, + }; + + const assets = { + V2: [ + { + id: { + Concrete: { + parents: 0, + interior: 'Here', + }, + }, + fun: { + Fungible: TRANSFER_AMOUNT, + }, + }, + ], + }; + const feeAssetItem = 0; + + await helper.xcm.limitedReserveTransferAssets(randomAccount, destination, beneficiary, assets, feeAssetItem, 'Unlimited'); + const messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + this._balanceUniqueTokenMiddle = await helper.balance.getSubstrate(randomAccount.address); + + this._unqFees = this._balanceUniqueTokenInit - this._balanceUniqueTokenMiddle - TRANSFER_AMOUNT; + console.log('[Unique -> %s] transaction fees on Unique: %s UNQ', networkName, helper.util.bigIntToDecimals(this._unqFees)); + expect(this._unqFees > 0n, 'Negative fees UNQ, looks like nothing was transferred').to.be.true; + + await targetPlayground(networkUrl, async (helper) => { + /* + Since only the parachain part of the Polkadex + infrastructure is launched (without their + solochain validators), processing incoming + assets will lead to an error. + This error indicates that the Polkadex chain + received a message from the Unique network, + since the hash is being checked to ensure + it matches what was sent. + */ + if(networkName == 'polkadex') { + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash); + } else { + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Success, event => event.messageHash == messageSent.messageHash); + } + }); + + }); + } + + async sendUnqBack( + networkName: keyof typeof NETWORKS, + sudoer: IKeyringPair, + randomAccountOnUnq: IKeyringPair, + ) { + const networkUrl = mapToChainUrl(networkName); + + const targetPlayground = getDevPlayground(networkName); + await usingPlaygrounds(async (helper) => { + + const xcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( + randomAccountOnUnq.addressRaw, + { + Concrete: { + parents: 1, + interior: { + X1: {Parachain: this._getNativeId()}, + }, + }, + }, + SENDBACK_AMOUNT, + ); + + let xcmProgramSent: any; + + + await targetPlayground(networkUrl, async (helper) => { + if('getSudo' in helper) { + await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, xcmProgram); + xcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } else if('fastDemocracy' in helper) { + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, xcmProgram]); + // Needed to bypass the call filter. + const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); + await helper.fastDemocracy.executeProposal(`sending ${networkName} -> Unique via XCM program`, batchCall); + xcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + }); + + await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Success, event => event.messageHash == xcmProgramSent.messageHash); + + this._balanceUniqueTokenFinal = await helper.balance.getSubstrate(randomAccountOnUnq.address); + + expect(this._balanceUniqueTokenFinal).to.be.equal(this._balanceUniqueTokenInit - this._unqFees - STAYED_ON_TARGET_CHAIN); + + }); + } + + async sendOnlyOwnedBalance( + networkName: keyof typeof NETWORKS, + sudoer: IKeyringPair, + ) { + const networkUrl = mapToChainUrl(networkName); + const targetPlayground = getDevPlayground(networkName); + + const targetChainBalance = 10000n * (10n ** UNQ_DECIMALS); + + await usingPlaygrounds(async (helper) => { + const targetChainSovereignAccount = helper.address.paraSiblingSovereignAccount(mapToChainId(networkName)); + await helper.getSudo().balance.setBalanceSubstrate(sudoer, targetChainSovereignAccount, targetChainBalance); + const moreThanTargetChainHas = 2n * targetChainBalance; + + const targetAccount = helper.arrange.createEmptyAccount(); + + const maliciousXcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( + targetAccount.addressRaw, + { + Concrete: { + parents: 0, + interior: 'Here', + }, + }, + moreThanTargetChainHas, + ); + + let maliciousXcmProgramSent: any; + + + await targetPlayground(networkUrl, async (helper) => { + if('getSudo' in helper) { + await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgram); + maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } else if('fastDemocracy' in helper) { + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgram]); + // Needed to bypass the call filter. + const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); + await helper.fastDemocracy.executeProposal(`sending ${networkName} -> Unique via XCM program`, batchCall); + maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + }); + + await expectFailedToTransact(helper, maliciousXcmProgramSent); + + const targetAccountBalance = await helper.balance.getSubstrate(targetAccount.address); + expect(targetAccountBalance).to.be.equal(0n); + }); + } + + async reserveTransferUNQfrom(netwokrName: keyof typeof NETWORKS, sudoer: IKeyringPair) { + const networkUrl = mapToChainUrl(netwokrName); + const targetPlayground = getDevPlayground(netwokrName); + + await usingPlaygrounds(async (helper) => { + const testAmount = 10_000n * (10n ** UNQ_DECIMALS); + const targetAccount = helper.arrange.createEmptyAccount(); + + const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( + targetAccount.addressRaw, + { + Concrete: { + parents: 1, + interior: { + X1: { + Parachain: this._getNativeId(), + }, + }, + }, + }, + testAmount, + ); + + const maliciousXcmProgramHereId = helper.arrange.makeXcmProgramReserveAssetDeposited( + targetAccount.addressRaw, + { + Concrete: { + parents: 0, + interior: 'Here', + }, + }, + testAmount, + ); + + let maliciousXcmProgramFullIdSent: any; + let maliciousXcmProgramHereIdSent: any; + const maxWaitBlocks = 3; + + // Try to trick Unique using full UNQ identification + await targetPlayground(networkUrl, async (helper) => { + if('getSudo' in helper) { + await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgramFullId); + maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + // Moonbeam case + else if('fastDemocracy' in helper) { + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramFullId]); + // Needed to bypass the call filter. + const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); + await helper.fastDemocracy.executeProposal(`${netwokrName} try to act like a reserve location for UNQ using path asset identification`,batchCall); + + maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + }); + + + await expectUntrustedReserveLocationFail(helper, maliciousXcmProgramFullIdSent); + + let accountBalance = await helper.balance.getSubstrate(targetAccount.address); + expect(accountBalance).to.be.equal(0n); + + // Try to trick Unique using shortened UNQ identification + await targetPlayground(networkUrl, async (helper) => { + if('getSudo' in helper) { + await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgramHereId); + maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + else if('fastDemocracy' in helper) { + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramHereId]); + // Needed to bypass the call filter. + const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); + await helper.fastDemocracy.executeProposal(`${netwokrName} try to act like a reserve location for UNQ using "here" asset identification`, batchCall); + + maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + }); + + await expectUntrustedReserveLocationFail(helper, maliciousXcmProgramHereIdSent); + + accountBalance = await helper.balance.getSubstrate(targetAccount.address); + expect(accountBalance).to.be.equal(0n); + }); + } + + async rejectNativeTokensFrom(networkName: keyof typeof NETWORKS, sudoerOnTargetChain: IKeyringPair) { + const networkUrl = mapToChainUrl(networkName); + const targetPlayground = getDevPlayground(networkName); + let messageSent: any; + + await usingPlaygrounds(async (helper) => { + const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( + helper.arrange.createEmptyAccount().addressRaw, + { + Concrete: { + parents: 1, + interior: { + X1: { + Parachain: mapToChainId(networkName), + }, + }, + }, + }, + TARGET_CHAIN_TOKEN_TRANSFER_AMOUNT, + ); + await targetPlayground(networkUrl, async (helper) => { + if('getSudo' in helper) { + await helper.getSudo().xcm.send(sudoerOnTargetChain, uniqueVersionedMultilocation, maliciousXcmProgramFullId); + messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } else if('fastDemocracy' in helper) { + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramFullId]); + // Needed to bypass the call filter. + const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); + await helper.fastDemocracy.executeProposal(`${networkName} sending native tokens to the Unique via fast democracy`, batchCall); + + messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); + } + }); + await expectFailedToTransact(helper, messageSent); + }); + } + } \ No newline at end of file diff --git a/tests/src/xcm/xcmQuartz.test.ts b/tests/src/xcm/xcmQuartz.test.ts index 354dbfb6eb..fe7d1083b1 100644 --- a/tests/src/xcm/xcmQuartz.test.ts +++ b/tests/src/xcm/xcmQuartz.test.ts @@ -15,29 +15,13 @@ // along with Unique Network. If not, see . import {IKeyringPair} from '@polkadot/types/types'; -import config from '../config'; import {itSub, expect, describeXCM, usingPlaygrounds, usingKaruraPlaygrounds, usingRelayPlaygrounds, usingMoonriverPlaygrounds, usingStateminePlaygrounds, usingShidenPlaygrounds} from '../util'; import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; +import {STATEMINE_CHAIN, QUARTZ_CHAIN, KARURA_CHAIN, MOONRIVER_CHAIN, SHIDEN_CHAIN, STATEMINE_DECIMALS, KARURA_DECIMALS, QTZ_DECIMALS, RELAY_DECIMALS, SHIDEN_DECIMALS, karuraUrl, moonriverUrl, relayUrl, shidenUrl, statemineUrl} from './xcm.types'; -const QUARTZ_CHAIN = +(process.env.RELAY_QUARTZ_ID || 2095); -const STATEMINE_CHAIN = +(process.env.RELAY_STATEMINE_ID || 1000); -const KARURA_CHAIN = +(process.env.RELAY_KARURA_ID || 2000); -const MOONRIVER_CHAIN = +(process.env.RELAY_MOONRIVER_ID || 2023); -const SHIDEN_CHAIN = +(process.env.RELAY_SHIDEN_ID || 2007); -const STATEMINE_PALLET_INSTANCE = 50; -const relayUrl = config.relayUrl; -const statemineUrl = config.statemineUrl; -const karuraUrl = config.karuraUrl; -const moonriverUrl = config.moonriverUrl; -const shidenUrl = config.shidenUrl; - -const RELAY_DECIMALS = 12; -const STATEMINE_DECIMALS = 12; -const KARURA_DECIMALS = 12; -const SHIDEN_DECIMALS = 18n; -const QTZ_DECIMALS = 18n; +const STATEMINE_PALLET_INSTANCE = 50; const TRANSFER_AMOUNT = 2000000000000000000000000n; From 8d26c73748339e76350add2ace1dafc2fa616947 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 26 Sep 2023 15:03:13 +0200 Subject: [PATCH 058/143] feat: allow xcm transact for sys,gov and utility --- runtime/common/config/xcm/mod.rs | 52 +++++++++++++++++++++++++++++--- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/runtime/common/config/xcm/mod.rs b/runtime/common/config/xcm/mod.rs index 1add5d3de6..a825d729ab 100644 --- a/runtime/common/config/xcm/mod.rs +++ b/runtime/common/config/xcm/mod.rs @@ -15,7 +15,7 @@ // along with Unique Network. If not, see . use frame_support::{ - traits::{Everything, Nothing, Get, ConstU32, ProcessMessageError}, + traits::{Everything, Nothing, Get, ConstU32, ProcessMessageError, Contains}, parameter_types, }; use frame_system::EnsureRoot; @@ -162,6 +162,52 @@ where pub type Weigher = FixedWeightBounds; +pub struct XcmCallFilter; +impl XcmCallFilter { + fn allow_gov_and_sys_call(call: &RuntimeCall) -> bool { + match call { + RuntimeCall::System(..) + | RuntimeCall::Identity(..) + | RuntimeCall::Preimage(..) + | RuntimeCall::Democracy(..) + | RuntimeCall::Council(..) + | RuntimeCall::TechnicalCommittee(..) + | RuntimeCall::CouncilMembership(..) + | RuntimeCall::TechnicalCommitteeMembership(..) + | RuntimeCall::FellowshipCollective(..) + | RuntimeCall::FellowshipReferenda(..) => true, + _ => false, + } + } + + fn allow_utility_call(call: &RuntimeCall) -> bool { + match call { + RuntimeCall::Utility(pallet_utility::Call::batch { calls, .. }) => { + calls.iter().all(|call| Self::allow_gov_and_sys_call(call)) + } + RuntimeCall::Utility(pallet_utility::Call::batch_all { calls, .. }) => { + calls.iter().all(|call| Self::allow_gov_and_sys_call(call)) + } + RuntimeCall::Utility(pallet_utility::Call::as_derivative { call, .. }) => { + Self::allow_gov_and_sys_call(call) + } + RuntimeCall::Utility(pallet_utility::Call::dispatch_as { call, .. }) => { + Self::allow_gov_and_sys_call(call) + } + RuntimeCall::Utility(pallet_utility::Call::force_batch { calls, .. }) => { + calls.iter().all(|call| Self::allow_gov_and_sys_call(call)) + } + _ => false, + } + } +} + +impl Contains for XcmCallFilter { + fn contains(call: &RuntimeCall) -> bool { + Self::allow_gov_and_sys_call(call) || Self::allow_utility_call(call) + } +} + pub struct XcmExecutorConfig(PhantomData); impl xcm_executor::Config for XcmExecutorConfig where @@ -191,9 +237,7 @@ where type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; - - // Deny all XCM Transacts. - type SafeCallFilter = Nothing; + type SafeCallFilter = XcmCallFilter; } #[cfg(feature = "runtime-benchmarks")] From 58b781a796e9e48b35f1ab8600500dbeee63922d Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Thu, 28 Sep 2023 16:14:02 +0200 Subject: [PATCH 059/143] feat: allow explicit unpaid execution from parent --- runtime/opal/src/xcm_barrier.rs | 17 ++++++++++++++--- runtime/quartz/src/xcm_barrier.rs | 7 ++++++- runtime/unique/src/xcm_barrier.rs | 7 ++++++- 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/runtime/opal/src/xcm_barrier.rs b/runtime/opal/src/xcm_barrier.rs index a9e9a6d450..44f664b78a 100644 --- a/runtime/opal/src/xcm_barrier.rs +++ b/runtime/opal/src/xcm_barrier.rs @@ -14,7 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use frame_support::traits::Everything; -use xcm_builder::{AllowTopLevelPaidExecutionFrom, TakeWeightCredit}; +use frame_support::{match_types, traits::Everything}; +use xcm::latest::{Junctions::*, MultiLocation}; +use xcm_builder::{AllowTopLevelPaidExecutionFrom, TakeWeightCredit, AllowExplicitUnpaidExecutionFrom}; -pub type Barrier = (TakeWeightCredit, AllowTopLevelPaidExecutionFrom); +match_types! { + pub type ParentOnly: impl Contains = { + MultiLocation { parents: 1, interior: Here } + }; +} + +pub type Barrier = ( + TakeWeightCredit, + AllowExplicitUnpaidExecutionFrom, + AllowTopLevelPaidExecutionFrom, +); diff --git a/runtime/quartz/src/xcm_barrier.rs b/runtime/quartz/src/xcm_barrier.rs index 9aa370399b..a774924e30 100644 --- a/runtime/quartz/src/xcm_barrier.rs +++ b/runtime/quartz/src/xcm_barrier.rs @@ -18,12 +18,16 @@ use frame_support::{match_types, traits::Everything}; use xcm::latest::{Junctions::*, MultiLocation}; use xcm_builder::{ AllowKnownQueryResponses, AllowSubscriptionsFrom, TakeWeightCredit, - AllowTopLevelPaidExecutionFrom, + AllowTopLevelPaidExecutionFrom, AllowExplicitUnpaidExecutionFrom, }; use crate::PolkadotXcm; match_types! { + pub type ParentOnly: impl Contains = { + MultiLocation { parents: 1, interior: Here } + }; + pub type ParentOrSiblings: impl Contains = { MultiLocation { parents: 1, interior: Here } | MultiLocation { parents: 1, interior: X1(_) } @@ -32,6 +36,7 @@ match_types! { pub type Barrier = ( TakeWeightCredit, + AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom, // Expected responses are OK. AllowKnownQueryResponses, diff --git a/runtime/unique/src/xcm_barrier.rs b/runtime/unique/src/xcm_barrier.rs index 9aa370399b..a774924e30 100644 --- a/runtime/unique/src/xcm_barrier.rs +++ b/runtime/unique/src/xcm_barrier.rs @@ -18,12 +18,16 @@ use frame_support::{match_types, traits::Everything}; use xcm::latest::{Junctions::*, MultiLocation}; use xcm_builder::{ AllowKnownQueryResponses, AllowSubscriptionsFrom, TakeWeightCredit, - AllowTopLevelPaidExecutionFrom, + AllowTopLevelPaidExecutionFrom, AllowExplicitUnpaidExecutionFrom, }; use crate::PolkadotXcm; match_types! { + pub type ParentOnly: impl Contains = { + MultiLocation { parents: 1, interior: Here } + }; + pub type ParentOrSiblings: impl Contains = { MultiLocation { parents: 1, interior: Here } | MultiLocation { parents: 1, interior: X1(_) } @@ -32,6 +36,7 @@ match_types! { pub type Barrier = ( TakeWeightCredit, + AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom, // Expected responses are OK. AllowKnownQueryResponses, From f61ec5aa50bb4fd53b862c9f3caa2bbf0d642d97 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Thu, 28 Sep 2023 16:17:14 +0200 Subject: [PATCH 060/143] feat: add dmpQueue event section --- tests/src/util/playgrounds/unique.dev.ts | 41 ++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/tests/src/util/playgrounds/unique.dev.ts b/tests/src/util/playgrounds/unique.dev.ts index 499d2e8935..10569d9658 100644 --- a/tests/src/util/playgrounds/unique.dev.ts +++ b/tests/src/util/playgrounds/unique.dev.ts @@ -9,7 +9,7 @@ import * as defs from '../../interfaces/definitions'; import {IKeyringPair} from '@polkadot/types/types'; import {EventRecord} from '@polkadot/types/interfaces'; import {ICrossAccountId, ILogger, IPovInfo, ISchedulerOptions, ITransactionResult, TSigner} from './types'; -import {FrameSystemEventRecord, XcmV2TraitsError} from '@polkadot/types/lookup'; +import {FrameSystemEventRecord, XcmV2TraitsError, XcmV3TraitsOutcome} from '@polkadot/types/lookup'; import {SignerOptions, VoidFn} from '@polkadot/api/types'; import {Pallets} from '..'; import {spawnSync} from 'child_process'; @@ -260,6 +260,12 @@ export class Event { outcome: eventData(data, 1), })); }; + + static DmpQueue = class extends EventSection('dmpQueue') { + static ExecutedDownward = this.Method('ExecutedDownward', data => ({ + outcome: eventData(data, 1), + })); + }; } // eslint-disable-next-line @typescript-eslint/naming-convention @@ -559,6 +565,12 @@ export class DevRelayHelper extends RelayHelper { super(logger, options); this.wait = new WaitGroup(this); } + + getSudo() { + // eslint-disable-next-line @typescript-eslint/naming-convention + const SudoHelperType = SudoHelper(this.helperBase); + return this.clone(SudoHelperType) as DevRelayHelper; + } } export class DevWestmintHelper extends WestmintHelper { @@ -968,6 +980,31 @@ export class ArrangeGroup { ], }; } + + makeTransactProgram(info: {weightMultiplier: number, call: string}) { + return { + V3: [ + { + UnpaidExecution: { + weightLimit: 'Unlimited', + checkOrigin: null, + }, + }, + { + Transact: { + originKind: 'Superuser', + requireWeightAtMost: { + refTime: info.weightMultiplier * 200000000, + proofSize: info.weightMultiplier * 3000, + }, + call: { + encoded: info.call, + }, + }, + }, + ], + }; + } } class MoonbeamAccountGroup { @@ -1501,4 +1538,4 @@ function ScheduledUniqueHelper(Base: T) { ); } }; -} \ No newline at end of file +} From 0f67e15c3f4a9733871fe4b6b47599fef37530fa Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Thu, 28 Sep 2023 20:40:20 +0200 Subject: [PATCH 061/143] test: relay does root ops --- tests/src/xcm/lowLevelXcmQuartz.test.ts | 66 ++++++++++- tests/src/xcm/lowLevelXcmUnique.test.ts | 68 +++++++++++- tests/src/xcm/xcm.types.ts | 139 +++++++++++++++++++++++- 3 files changed, 268 insertions(+), 5 deletions(-) diff --git a/tests/src/xcm/lowLevelXcmQuartz.test.ts b/tests/src/xcm/lowLevelXcmQuartz.test.ts index 41a1258062..1c1e52ad22 100644 --- a/tests/src/xcm/lowLevelXcmQuartz.test.ts +++ b/tests/src/xcm/lowLevelXcmQuartz.test.ts @@ -15,7 +15,7 @@ // along with Unique Network. If not, see . import {IKeyringPair} from '@polkadot/types/types'; -import {itSub, expect, describeXCM, usingPlaygrounds, usingKaruraPlaygrounds, usingMoonriverPlaygrounds, usingShidenPlaygrounds} from '../util'; +import {itSub, expect, describeXCM, usingPlaygrounds, usingKaruraPlaygrounds, usingMoonriverPlaygrounds, usingShidenPlaygrounds, usingRelayPlaygrounds} from '../util'; import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; import {STATEMINE_CHAIN, QUARTZ_CHAIN, KARURA_CHAIN, MOONRIVER_CHAIN, SHIDEN_CHAIN, STATEMINE_DECIMALS, KARURA_DECIMALS, QTZ_DECIMALS, RELAY_DECIMALS, SHIDEN_DECIMALS, karuraUrl, moonriverUrl, relayUrl, shidenUrl, statemineUrl, SAFE_XCM_VERSION, XcmTestHelper, TRANSFER_AMOUNT} from './xcm.types'; @@ -379,3 +379,67 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Shiden', () => { await testHelper.reserveTransferUNQfrom('shiden', alice); }); }); + +describeXCM('[XCMLL] Integration test: The relay can do some root ops', () => { + let sudoer: IKeyringPair; + + before(async function () { + await usingRelayPlaygrounds(relayUrl, async (_, privateKey) => { + sudoer = await privateKey('//Alice'); + }); + }); + + // At the moment there is no reliable way + // to establish the correspondence between the `ExecutedDownward` event + // and the relay's sent message due to `SetTopic` instruction + // containing an unpredictable topic silently added by the relay on the router level. + // This changes the message hash on arrival to our chain. + // + // See: + // * The relay's router: https://github.com/paritytech/polkadot-sdk/blob/f60318f68687e601c47de5ad5ca88e2c3f8139a7/polkadot/runtime/westend/src/xcm_config.rs#L83 + // * The `WithUniqueTopic` helper: https://github.com/paritytech/polkadot-sdk/blob/945ebbbcf66646be13d5b1d1bc26c8b0d3296d9e/polkadot/xcm/xcm-builder/src/routing.rs#L36 + // + // Because of this, we insert time gaps between tests so + // different `ExecutedDownward` events won't interfere with each other. + afterEach(async () => { + await usingPlaygrounds(async (helper) => { + await helper.wait.newBlocks(3); + }); + }); + + itSub('The relay can set storage', async () => { + await testHelper.relayIsPermittedToSetStorage(sudoer, 'plain'); + }); + + itSub('The relay can batch set storage', async () => { + await testHelper.relayIsPermittedToSetStorage(sudoer, 'batch'); + }); + + itSub('The relay can batchAll set storage', async () => { + await testHelper.relayIsPermittedToSetStorage(sudoer, 'batchAll'); + }); + + itSub('The relay can forceBatch set storage', async () => { + await testHelper.relayIsPermittedToSetStorage(sudoer, 'forceBatch'); + }); + + itSub('[negative] The relay cannot set balance', async () => { + await testHelper.relayIsNotPermittedToSetBalance(sudoer, 'plain'); + }); + + itSub('[negative] The relay cannot set balance via batch', async () => { + await testHelper.relayIsNotPermittedToSetBalance(sudoer, 'batch'); + }); + + itSub('[negative] The relay cannot set balance via batchAll', async () => { + await testHelper.relayIsNotPermittedToSetBalance(sudoer, 'batchAll'); + }); + + itSub('[negative] The relay cannot set balance via forceBatch', async () => { + await testHelper.relayIsNotPermittedToSetBalance(sudoer, 'forceBatch'); + }); + + itSub('[negative] The relay cannot set balance via dispatchAs', async () => { + await testHelper.relayIsNotPermittedToSetBalance(sudoer, 'dispatchAs'); + }); +}); diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts index dadc44364d..ad69cbf828 100644 --- a/tests/src/xcm/lowLevelXcmUnique.test.ts +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -16,11 +16,11 @@ import {IKeyringPair} from '@polkadot/types/types'; import config from '../config'; -import {itSub, expect, describeXCM, usingPlaygrounds, usingAcalaPlaygrounds, usingMoonbeamPlaygrounds, usingAstarPlaygrounds, usingPolkadexPlaygrounds} from '../util'; +import {itSub, expect, describeXCM, usingPlaygrounds, usingAcalaPlaygrounds, usingMoonbeamPlaygrounds, usingAstarPlaygrounds, usingPolkadexPlaygrounds, usingRelayPlaygrounds, requirePalletsOrSkip, Pallets} from '../util'; import {Event} from '../util/playgrounds/unique.dev'; import {nToBigInt} from '@polkadot/util'; import {hexToString} from '@polkadot/util'; -import {ASTAR_DECIMALS, NETWORKS, SAFE_XCM_VERSION, UNIQUE_CHAIN, UNQ_DECIMALS, XcmTestHelper, acalaUrl, astarUrl, expectFailedToTransact, expectUntrustedReserveLocationFail, getDevPlayground, mapToChainId, mapToChainUrl, maxWaitBlocks, moonbeamUrl, polkadexUrl, uniqueAssetId, uniqueVersionedMultilocation} from './xcm.types'; +import {ASTAR_DECIMALS, NETWORKS, SAFE_XCM_VERSION, UNIQUE_CHAIN, UNQ_DECIMALS, XcmTestHelper, acalaUrl, astarUrl, expectFailedToTransact, expectUntrustedReserveLocationFail, getDevPlayground, mapToChainId, mapToChainUrl, maxWaitBlocks, moonbeamUrl, polkadexUrl, relayUrl, uniqueAssetId, uniqueVersionedMultilocation} from './xcm.types'; const TRANSFER_AMOUNT = 2000000_000_000_000_000_000_000n; @@ -672,3 +672,67 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Astar', () => { await genericReserveTransferUNQfrom('astar', alice); }); }); + +describeXCM('[XCMLL] Integration test: The relay can do some root ops', () => { + let sudoer: IKeyringPair; + + before(async function () { + await usingRelayPlaygrounds(relayUrl, async (_, privateKey) => { + sudoer = await privateKey('//Alice'); + }); + }); + + // At the moment there is no reliable way + // to establish the correspondence between the `ExecutedDownward` event + // and the relay's sent message due to `SetTopic` instruction + // containing an unpredictable topic silently added by the relay on the router level. + // This changes the message hash on arrival to our chain. + // + // See: + // * The relay's router: https://github.com/paritytech/polkadot-sdk/blob/f60318f68687e601c47de5ad5ca88e2c3f8139a7/polkadot/runtime/westend/src/xcm_config.rs#L83 + // * The `WithUniqueTopic` helper: https://github.com/paritytech/polkadot-sdk/blob/945ebbbcf66646be13d5b1d1bc26c8b0d3296d9e/polkadot/xcm/xcm-builder/src/routing.rs#L36 + // + // Because of this, we insert time gaps between tests so + // different `ExecutedDownward` events won't interfere with each other. + afterEach(async () => { + await usingPlaygrounds(async (helper) => { + await helper.wait.newBlocks(3); + }); + }); + + itSub('The relay can set storage', async () => { + await testHelper.relayIsPermittedToSetStorage(sudoer, 'plain'); + }); + + itSub('The relay can batch set storage', async () => { + await testHelper.relayIsPermittedToSetStorage(sudoer, 'batch'); + }); + + itSub('The relay can batchAll set storage', async () => { + await testHelper.relayIsPermittedToSetStorage(sudoer, 'batchAll'); + }); + + itSub('The relay can forceBatch set storage', async () => { + await testHelper.relayIsPermittedToSetStorage(sudoer, 'forceBatch'); + }); + + itSub('[negative] The relay cannot set balance', async () => { + await testHelper.relayIsNotPermittedToSetBalance(sudoer, 'plain'); + }); + + itSub('[negative] The relay cannot set balance via batch', async () => { + await testHelper.relayIsNotPermittedToSetBalance(sudoer, 'batch'); + }); + + itSub('[negative] The relay cannot set balance via batchAll', async () => { + await testHelper.relayIsNotPermittedToSetBalance(sudoer, 'batchAll'); + }); + + itSub('[negative] The relay cannot set balance via forceBatch', async () => { + await testHelper.relayIsNotPermittedToSetBalance(sudoer, 'forceBatch'); + }); + + itSub('[negative] The relay cannot set balance via dispatchAs', async () => { + await testHelper.relayIsNotPermittedToSetBalance(sudoer, 'dispatchAs'); + }); +}); diff --git a/tests/src/xcm/xcm.types.ts b/tests/src/xcm/xcm.types.ts index cf6e6fd10c..5e6bd5bebb 100644 --- a/tests/src/xcm/xcm.types.ts +++ b/tests/src/xcm/xcm.types.ts @@ -1,7 +1,9 @@ import {IKeyringPair} from '@polkadot/types/types'; -import {expect, usingAcalaPlaygrounds, usingAstarPlaygrounds, usingKaruraPlaygrounds, usingMoonbeamPlaygrounds, usingMoonriverPlaygrounds, usingPlaygrounds, usingPolkadexPlaygrounds, usingShidenPlaygrounds} from '../util'; +import {hexToString} from '@polkadot/util'; +import {expect, usingAcalaPlaygrounds, usingAstarPlaygrounds, usingKaruraPlaygrounds, usingMoonbeamPlaygrounds, usingMoonriverPlaygrounds, usingPlaygrounds, usingPolkadexPlaygrounds, usingRelayPlaygrounds, usingShidenPlaygrounds} from '../util'; import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; import config from '../config'; +import {blake2AsHex} from '@polkadot/util-crypto'; export const UNIQUE_CHAIN = +(process.env.RELAY_UNIQUE_ID || 2037); export const STATEMINT_CHAIN = +(process.env.RELAY_STATEMINT_ID || 1000); @@ -68,6 +70,16 @@ export const expectUntrustedReserveLocationFail = async (helper: DevUniqueHelper && event.outcome.isUntrustedReserveLocation); }; +export const expectDownwardXcmNoPermission = async (helper: DevUniqueHelper) => { + // The correct messageHash for downward messages can't be reliably obtained + await helper.wait.expectEvent(maxWaitBlocks, Event.DmpQueue.ExecutedDownward, event => event.outcome.asIncomplete[1].isNoPermission); +}; + +export const expectDownwardXcmComplete = async (helper: DevUniqueHelper) => { + // The correct messageHash for downward messages can't be reliably obtained + await helper.wait.expectEvent(maxWaitBlocks, Event.DmpQueue.ExecutedDownward, event => event.outcome.isComplete); +}; + export const NETWORKS = { acala: usingAcalaPlaygrounds, astar: usingAstarPlaygrounds, @@ -161,6 +173,17 @@ export class XcmTestHelper { } } + uniqueChainMultilocationForRelay() { + return { + V3: { + parents: 0, + interior: { + X1: {Parachain: this._getNativeId()}, + }, + }, + }; + } + async sendUnqTo( networkName: keyof typeof NETWORKS, randomAccount: IKeyringPair, @@ -470,4 +493,116 @@ export class XcmTestHelper { }); } -} \ No newline at end of file + private async _relayXcmTransactSetStorage(variant: 'plain' | 'batch' | 'batchAll' | 'forceBatch') { + // eslint-disable-next-line require-await + return await usingPlaygrounds(async (helper) => { + const relayForceKV = () => { + const random = Math.random(); + const key = `relay-forced-key (instance: ${random})`; + const val = `relay-forced-value (instance: ${random})`; + const call = helper.constructApiCall('api.tx.system.setStorage', [[[key, val]]]).method.toHex(); + + return { + call, + key, + val, + }; + }; + + if(variant == 'plain') { + const kv = relayForceKV(); + return { + program: helper.arrange.makeTransactProgram({ + weightMultiplier: 1, + call: kv.call, + }), + kvs: [kv], + }; + } else { + const kv0 = relayForceKV(); + const kv1 = relayForceKV(); + + const batchCall = helper.constructApiCall(`api.tx.utility.${variant}`, [[kv0.call, kv1.call]]).method.toHex(); + return { + program: helper.arrange.makeTransactProgram({ + weightMultiplier: 2, + call: batchCall, + }), + kvs: [kv0, kv1], + }; + } + }); + } + + async relayIsPermittedToSetStorage(relaySudoer: IKeyringPair, variant: 'plain' | 'batch' | 'batchAll' | 'forceBatch') { + const {program, kvs} = await this._relayXcmTransactSetStorage(variant); + + await usingRelayPlaygrounds(relayUrl, async (helper) => { + await helper.getSudo().executeExtrinsic(relaySudoer, 'api.tx.xcmPallet.send', [ + this.uniqueChainMultilocationForRelay(), + program, + ]); + }); + + await usingPlaygrounds(async (helper) => { + await expectDownwardXcmComplete(helper); + + for(const kv of kvs) { + const forcedValue = await helper.callRpc('api.rpc.state.getStorage', [kv.key]); + expect(hexToString(forcedValue.toHex())).to.be.equal(kv.val); + } + }); + } + + private async _relayXcmTransactSetBalance(variant: 'plain' | 'batch' | 'batchAll' | 'forceBatch' | 'dispatchAs') { + // eslint-disable-next-line require-await + return await usingPlaygrounds(async (helper) => { + const emptyAccount = helper.arrange.createEmptyAccount().address; + + const forceSetBalanceCall = helper.constructApiCall('api.tx.balances.forceSetBalance', [emptyAccount, 10_000n]).method.toHex(); + + let call; + + if(variant == 'plain') { + call = forceSetBalanceCall; + + } else if(variant == 'dispatchAs') { + call = helper.constructApiCall('api.tx.utility.dispatchAs', [ + { + system: 'Root', + }, + forceSetBalanceCall, + ]).method.toHex(); + } else { + call = helper.constructApiCall(`api.tx.utility.${variant}`, [[forceSetBalanceCall]]).method.toHex(); + } + + return { + program: helper.arrange.makeTransactProgram({ + weightMultiplier: 1, + call, + }), + emptyAccount, + }; + }); + } + + async relayIsNotPermittedToSetBalance( + relaySudoer: IKeyringPair, + variant: 'plain' | 'batch' | 'batchAll' | 'forceBatch' | 'dispatchAs', + ) { + const {program, emptyAccount} = await this._relayXcmTransactSetBalance(variant); + + await usingRelayPlaygrounds(relayUrl, async (helper) => { + await helper.getSudo().executeExtrinsic(relaySudoer, 'api.tx.xcmPallet.send', [ + this.uniqueChainMultilocationForRelay(), + program, + ]); + }); + + await usingPlaygrounds(async (helper) => { + await expectDownwardXcmNoPermission(helper); + expect(await helper.balance.getSubstrate(emptyAccount)).to.be.equal(0n); + }); + } +} From 06b395f7b928836522c61c78d7a43c2af7d2842a Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Thu, 28 Sep 2023 20:45:03 +0200 Subject: [PATCH 062/143] fix: allow xcm transact for gov bodies when they exist --- runtime/common/config/xcm/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/runtime/common/config/xcm/mod.rs b/runtime/common/config/xcm/mod.rs index a825d729ab..d27794ba0d 100644 --- a/runtime/common/config/xcm/mod.rs +++ b/runtime/common/config/xcm/mod.rs @@ -166,8 +166,10 @@ pub struct XcmCallFilter; impl XcmCallFilter { fn allow_gov_and_sys_call(call: &RuntimeCall) -> bool { match call { - RuntimeCall::System(..) - | RuntimeCall::Identity(..) + RuntimeCall::System(..) => true, + + #[cfg(feature = "governance")] + RuntimeCall::Identity(..) | RuntimeCall::Preimage(..) | RuntimeCall::Democracy(..) | RuntimeCall::Council(..) From 668280b1206de9640cc6971283f55bc921a1854d Mon Sep 17 00:00:00 2001 From: PraetorP Date: Fri, 29 Sep 2023 11:32:27 +0000 Subject: [PATCH 063/143] feat(xcm tests): `XcmHelper` integraion for QTZ\UNQ --- .baedeker/.gitignore | 3 +- .github/workflows/xcm.yml | 2 +- tests/src/xcm/lowLevelXcmQuartz.test.ts | 214 +++++---------- tests/src/xcm/lowLevelXcmUnique.test.ts | 335 +----------------------- tests/src/xcm/xcm.types.ts | 47 ++-- tests/src/xcm/xcmQuartz.test.ts | 102 ++++---- 6 files changed, 173 insertions(+), 530 deletions(-) diff --git a/.baedeker/.gitignore b/.baedeker/.gitignore index fb2b1c2276..a0d706661e 100644 --- a/.baedeker/.gitignore +++ b/.baedeker/.gitignore @@ -1,4 +1,5 @@ /.bdk-env -/rewrites.jsonnet +/rewrites*.jsonnet /vendor /baedeker-library +!/rewrites.example.jsonnet \ No newline at end of file diff --git a/.github/workflows/xcm.yml b/.github/workflows/xcm.yml index a70bdbd155..b9598a8d02 100644 --- a/.github/workflows/xcm.yml +++ b/.github/workflows/xcm.yml @@ -38,7 +38,7 @@ jobs: with: matrix: | network {opal}, relay_branch {${{ env.UNIQUEWEST_MAINNET_BRANCH }}}, acala_version {${{ env.ACALA_BUILD_BRANCH }}}, moonbeam_version {${{ env.MOONBEAM_BUILD_BRANCH }}}, cumulus_version {${{ env.WESTMINT_BUILD_BRANCH }}}, astar_version {${{ env.ASTAR_BUILD_BRANCH }}}, polkadex_version {${{ env.POLKADEX_BUILD_BRANCH }}}, runtest {testXcmOpal}, runtime_features {opal-runtime} - network {quartz}, relay_branch {${{ env.KUSAMA_MAINNET_BRANCH }}}, acala_version {${{ env.KARURA_BUILD_BRANCH }}}, moonbeam_version {${{ env.MOONRIVER_BUILD_BRANCH }}}, cumulus_version {${{ env.STATEMINE_BUILD_BRANCH }}}, astar_version {${{ env.SHIDEN_BUILD_BRANCH }}}, polkadex_version {${{ env.POLKADEX_BUILD_BRANCH }}}, runtest {testXcmQuartz}, runtime_features {quartz-runtime} + network {quartz}, relay_branch {${{ env.KUSAMA_MAINNET_BRANCH }}}, acala_version {${{ env.KARURA_BUILD_BRANCH }}}, moonbeam_version {${{ env.MOONRIVER_BUILD_BRANCH }}}, cumulus_version {${{ env.STATEMINE_BUILD_BRANCH }}}, astar_version {${{ env.SHIDEN_BUILD_BRANCH }}}, polkadex_version {${{ env.POLKADEX_BUILD_BRANCH }}}, runtest {testFullXcmQuartz}, runtime_features {quartz-runtime} network {unique}, relay_branch {${{ env.POLKADOT_MAINNET_BRANCH }}}, acala_version {${{ env.ACALA_BUILD_BRANCH }}}, moonbeam_version {${{ env.MOONBEAM_BUILD_BRANCH }}}, cumulus_version {${{ env.STATEMINT_BUILD_BRANCH }}}, astar_version {${{ env.ASTAR_BUILD_BRANCH }}}, polkadex_version {${{ env.POLKADEX_BUILD_BRANCH }}}, runtest {testFullXcmUnique}, runtime_features {unique-runtime} xcm: diff --git a/tests/src/xcm/lowLevelXcmQuartz.test.ts b/tests/src/xcm/lowLevelXcmQuartz.test.ts index 1c1e52ad22..0bee550650 100644 --- a/tests/src/xcm/lowLevelXcmQuartz.test.ts +++ b/tests/src/xcm/lowLevelXcmQuartz.test.ts @@ -15,10 +15,9 @@ // along with Unique Network. If not, see . import {IKeyringPair} from '@polkadot/types/types'; -import {itSub, expect, describeXCM, usingPlaygrounds, usingKaruraPlaygrounds, usingMoonriverPlaygrounds, usingShidenPlaygrounds, usingRelayPlaygrounds} from '../util'; -import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; -import {STATEMINE_CHAIN, QUARTZ_CHAIN, KARURA_CHAIN, MOONRIVER_CHAIN, SHIDEN_CHAIN, STATEMINE_DECIMALS, KARURA_DECIMALS, QTZ_DECIMALS, RELAY_DECIMALS, SHIDEN_DECIMALS, karuraUrl, moonriverUrl, relayUrl, shidenUrl, statemineUrl, SAFE_XCM_VERSION, XcmTestHelper, TRANSFER_AMOUNT} from './xcm.types'; - +import {itSub, describeXCM, usingPlaygrounds, usingKaruraPlaygrounds, usingMoonriverPlaygrounds, usingShidenPlaygrounds, usingRelayPlaygrounds} from '../util'; +import {QUARTZ_CHAIN, QTZ_DECIMALS, SHIDEN_DECIMALS, karuraUrl, moonriverUrl, shidenUrl, SAFE_XCM_VERSION, XcmTestHelper, TRANSFER_AMOUNT, SENDER_BUDGET, relayUrl} from './xcm.types'; +import {hexToString} from '@polkadot/util'; const testHelper = new XcmTestHelper('quartz'); @@ -26,24 +25,6 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Karura', () => { let alice: IKeyringPair; let randomAccount: IKeyringPair; - let balanceQuartzTokenInit: bigint; - let balanceQuartzTokenMiddle: bigint; - let balanceQuartzTokenFinal: bigint; - let balanceKaruraTokenInit: bigint; - let balanceKaruraTokenMiddle: bigint; - let balanceKaruraTokenFinal: bigint; - let balanceQuartzForeignTokenInit: bigint; - let balanceQuartzForeignTokenMiddle: bigint; - let balanceQuartzForeignTokenFinal: bigint; - - // computed by a test transfer from prod Quartz to prod Karura. - // 2 QTZ sent https://quartz.subscan.io/xcm_message/kusama-f60d821b049f8835a3005ce7102285006f5b61e9 - // 1.919176000000000000 QTZ received (you can check Karura's chain state in the corresponding block) - const expectedKaruraIncomeFee = 2000000000000000000n - 1919176000000000000n; - const karuraEps = 8n * 10n ** 16n; - - let karuraBackwardTransferAmount: bigint; - before(async () => { await usingPlaygrounds(async (helper, privateKey) => { alice = await privateKey('//Alice'); @@ -72,15 +53,19 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Karura', () => { minimalBalance: 1000000000000000000n, }; - await helper.getSudo().assetRegistry.registerForeignAsset(alice, destination, metadata); + const assets = (await (helper.callRpc('api.query.assetRegistry.assetMetadatas.entries'))).map(([_k, v]: [any, any]) => + hexToString(v.toJSON()['symbol'])) as string[]; + + if(!assets.includes('QTZ')) { + await helper.getSudo().assetRegistry.registerForeignAsset(alice, destination, metadata); + } else { + console.log('QTZ token already registered on Karura assetRegistry pallet'); + } await helper.balance.transferToSubstrate(alice, randomAccount.address, 10000000000000n); - balanceKaruraTokenInit = await helper.balance.getSubstrate(randomAccount.address); - balanceQuartzForeignTokenInit = await helper.tokens.accounts(randomAccount.address, {ForeignAsset: 0}); }); await usingPlaygrounds(async (helper) => { - await helper.balance.transferToSubstrate(alice, randomAccount.address, 10n * TRANSFER_AMOUNT); - balanceQuartzTokenInit = await helper.balance.getSubstrate(randomAccount.address); + await helper.balance.transferToSubstrate(alice, randomAccount.address, SENDER_BUDGET); }); }); @@ -100,67 +85,17 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Karura', () => { // the the corresponding foreign assets are not registered describeXCM('[XCMLL] Integration test: Quartz rejects non-native tokens', () => { let alice: IKeyringPair; - let alith: IKeyringPair; - - const testAmount = 100_000_000_000n; - let quartzParachainJunction; - let quartzAccountJunction; - let quartzParachainMultilocation: any; - let quartzAccountMultilocation: any; - let quartzCombinedMultilocation: any; - - let messageSent: any; - - const maxWaitBlocks = 3; before(async () => { await usingPlaygrounds(async (helper, privateKey) => { alice = await privateKey('//Alice'); - quartzParachainJunction = {Parachain: QUARTZ_CHAIN}; - quartzAccountJunction = { - AccountId32: { - network: 'Any', - id: alice.addressRaw, - }, - }; - - quartzParachainMultilocation = { - V2: { - parents: 1, - interior: { - X1: quartzParachainJunction, - }, - }, - }; - - quartzAccountMultilocation = { - V2: { - parents: 0, - interior: { - X1: quartzAccountJunction, - }, - }, - }; - quartzCombinedMultilocation = { - V2: { - parents: 1, - interior: { - X2: [quartzParachainJunction, quartzAccountJunction], - }, - }, - }; // Set the default version to wrap the first message to other chains. await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); }); - - // eslint-disable-next-line require-await - await usingMoonriverPlaygrounds(moonriverUrl, async (helper) => { - alith = helper.account.alithAccount(); - }); }); itSub('Quartz rejects KAR tokens from Karura', async () => { @@ -195,22 +130,12 @@ describeXCM('[XCMLL] Integration test: Exchanging QTZ with Moonriver', () => { minimalBalance: 1n, }; - let balanceQuartzTokenInit: bigint; - let balanceQuartzTokenMiddle: bigint; - let balanceQuartzTokenFinal: bigint; - let balanceForeignQtzTokenInit: bigint; - let balanceForeignQtzTokenMiddle: bigint; - let balanceForeignQtzTokenFinal: bigint; - let balanceMovrTokenInit: bigint; - let balanceMovrTokenMiddle: bigint; - let balanceMovrTokenFinal: bigint; before(async () => { await usingPlaygrounds(async (helper, privateKey) => { alice = await privateKey('//Alice'); [randomAccountQuartz] = await helper.arrange.createAccounts([0n], alice); - balanceForeignQtzTokenInit = 0n; // Set the default version to wrap the first message to other chains. await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); @@ -239,20 +164,22 @@ describeXCM('[XCMLL] Integration test: Exchanging QTZ with Moonriver', () => { const isSufficient = true; const unitsPerSecond = 1n; const numAssetsWeightHint = 0; - - const encodedProposal = helper.assetManager.makeRegisterForeignAssetProposal({ - location: quartzAssetLocation, - metadata: quartzAssetMetadata, - existentialDeposit, - isSufficient, - unitsPerSecond, - numAssetsWeightHint, - }); - - console.log('Encoded proposal for registerForeignAsset & setAssetUnitsPerSecond is %s', encodedProposal); - - await helper.fastDemocracy.executeProposal('register QTZ foreign asset', encodedProposal); - + if((await helper.assetManager.assetTypeId(quartzAssetLocation)).toJSON()) { + console.log('Quartz asset already registered on Moonriver'); + } else { + const encodedProposal = helper.assetManager.makeRegisterForeignAssetProposal({ + location: quartzAssetLocation, + metadata: quartzAssetMetadata, + existentialDeposit, + isSufficient, + unitsPerSecond, + numAssetsWeightHint, + }); + + console.log('Encoded proposal for registerForeignAsset & setAssetUnitsPerSecond is %s', encodedProposal); + + await helper.fastDemocracy.executeProposal('register QTZ foreign asset', encodedProposal); + } // >>> Acquire Quartz AssetId Info on Moonriver >>> console.log('Acquire Quartz AssetId Info on Moonriver.......'); @@ -267,13 +194,10 @@ describeXCM('[XCMLL] Integration test: Exchanging QTZ with Moonriver', () => { await helper.balance.transferToEthereum(baltatharAccount, randomAccountMoonriver.address, 11_000_000_000_000_000_000n); console.log('Sponsoring random Account.......DONE'); // <<< Sponsoring random Account <<< - - balanceMovrTokenInit = await helper.balance.getEthereum(randomAccountMoonriver.address); }); await usingPlaygrounds(async (helper) => { await helper.balance.transferToSubstrate(alice, randomAccountQuartz.address, 10n * TRANSFER_AMOUNT); - balanceQuartzTokenInit = await helper.balance.getSubstrate(randomAccountQuartz.address); }); }); @@ -296,7 +220,7 @@ describeXCM('[XCMLL] Integration test: Exchanging QTZ with Moonriver', () => { describeXCM('[XCMLL] Integration test: Exchanging tokens with Shiden', () => { let alice: IKeyringPair; - let sender: IKeyringPair; + let randomAccount: IKeyringPair; const QTZ_ASSET_ID_ON_SHIDEN = 1; const QTZ_MINIMAL_BALANCE_ON_SHIDEN = 1n; @@ -304,71 +228,69 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Shiden', () => { // Quartz -> Shiden const shidenInitialBalance = 1n * (10n ** SHIDEN_DECIMALS); // 1 SHD, existential deposit required to actually create the account on Shiden const unitsPerSecond = 228_000_000_000n; // This is Phala's value. What will be ours? - const qtzToShidenTransferred = 10n * (10n ** QTZ_DECIMALS); // 10 QTZ - const qtzToShidenArrived = 9_999_999_999_088_000_000n; // 9.999 ... QTZ, Shiden takes a commision in foreign tokens - // Shiden -> Quartz - const qtzFromShidenTransfered = 5n * (10n ** QTZ_DECIMALS); // 5 QTZ - const qtzOnShidenLeft = qtzToShidenArrived - qtzFromShidenTransfered; // 4.999_999_999_088_000_000n QTZ - let balanceAfterQuartzToShidenXCM: bigint; before(async () => { await usingPlaygrounds(async (helper, privateKey) => { alice = await privateKey('//Alice'); - [sender] = await helper.arrange.createAccounts([100n], alice); - console.log('sender', sender.address); + randomAccount = helper.arrange.createEmptyAccount(); + await helper.balance.transferToSubstrate(alice, randomAccount.address, SENDER_BUDGET); + console.log('sender: ', randomAccount.address); // Set the default version to wrap the first message to other chains. await helper.getSudo().xcm.setSafeXcmVersion(alice, SAFE_XCM_VERSION); }); await usingShidenPlaygrounds(shidenUrl, async (helper) => { - console.log('1. Create foreign asset and metadata'); - // TODO update metadata with values from production - await helper.assets.create( - alice, - QTZ_ASSET_ID_ON_SHIDEN, - alice.address, - QTZ_MINIMAL_BALANCE_ON_SHIDEN, - ); - - await helper.assets.setMetadata( - alice, - QTZ_ASSET_ID_ON_SHIDEN, - 'Cross chain QTZ', - 'xcQTZ', - Number(QTZ_DECIMALS), - ); - - console.log('2. Register asset location on Shiden'); - const assetLocation = { - V2: { - parents: 1, - interior: { - X1: { - Parachain: QUARTZ_CHAIN, + if(!(await helper.callRpc('api.query.assets.asset', [QTZ_ASSET_ID_ON_SHIDEN])).toJSON()) { + console.log('1. Create foreign asset and metadata'); + // TODO update metadata with values from production + await helper.assets.create( + alice, + QTZ_ASSET_ID_ON_SHIDEN, + alice.address, + QTZ_MINIMAL_BALANCE_ON_SHIDEN, + ); + + await helper.assets.setMetadata( + alice, + QTZ_ASSET_ID_ON_SHIDEN, + 'Cross chain QTZ', + 'xcQTZ', + Number(QTZ_DECIMALS), + ); + + console.log('2. Register asset location on Shiden'); + const assetLocation = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: QUARTZ_CHAIN, + }, }, }, - }, - }; - - await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.registerAssetLocation', [assetLocation, QTZ_ASSET_ID_ON_SHIDEN]); + }; - console.log('3. Set QTZ payment for XCM execution on Shiden'); - await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.setAssetUnitsPerSecond', [assetLocation, unitsPerSecond]); + await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.registerAssetLocation', [assetLocation, QTZ_ASSET_ID_ON_SHIDEN]); + console.log('3. Set QTZ payment for XCM execution on Shiden'); + await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.setAssetUnitsPerSecond', [assetLocation, unitsPerSecond]); + } else { + console.log('QTZ is already registered on Shiden'); + } console.log('4. Transfer 1 SDN to recipient to create the account (needed due to existential balance)'); - await helper.balance.transferToSubstrate(alice, sender.address, shidenInitialBalance); + await helper.balance.transferToSubstrate(alice, randomAccount.address, shidenInitialBalance); }); }); itSub('Should connect and send QTZ to Shiden', async () => { - await testHelper.sendUnqTo('shiden', sender); + await testHelper.sendUnqTo('shiden', randomAccount); }); itSub('Should connect to Shiden and send QTZ back', async () => { - await testHelper.sendUnqBack('shiden', alice, sender); + await testHelper.sendUnqBack('shiden', alice, randomAccount); }); itSub('Shiden can send only up to its balance', async () => { diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts index ad69cbf828..1cbafd4639 100644 --- a/tests/src/xcm/lowLevelXcmUnique.test.ts +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -16,318 +16,14 @@ import {IKeyringPair} from '@polkadot/types/types'; import config from '../config'; -import {itSub, expect, describeXCM, usingPlaygrounds, usingAcalaPlaygrounds, usingMoonbeamPlaygrounds, usingAstarPlaygrounds, usingPolkadexPlaygrounds, usingRelayPlaygrounds, requirePalletsOrSkip, Pallets} from '../util'; -import {Event} from '../util/playgrounds/unique.dev'; +import {itSub, describeXCM, usingPlaygrounds, usingAcalaPlaygrounds, usingMoonbeamPlaygrounds, usingAstarPlaygrounds, usingPolkadexPlaygrounds, usingRelayPlaygrounds} from '../util'; import {nToBigInt} from '@polkadot/util'; import {hexToString} from '@polkadot/util'; -import {ASTAR_DECIMALS, NETWORKS, SAFE_XCM_VERSION, UNIQUE_CHAIN, UNQ_DECIMALS, XcmTestHelper, acalaUrl, astarUrl, expectFailedToTransact, expectUntrustedReserveLocationFail, getDevPlayground, mapToChainId, mapToChainUrl, maxWaitBlocks, moonbeamUrl, polkadexUrl, relayUrl, uniqueAssetId, uniqueVersionedMultilocation} from './xcm.types'; - - -const TRANSFER_AMOUNT = 2000000_000_000_000_000_000_000n; -const SENDER_BUDGET = 2n * TRANSFER_AMOUNT; -const SENDBACK_AMOUNT = TRANSFER_AMOUNT / 2n; -const STAYED_ON_TARGET_CHAIN = TRANSFER_AMOUNT - SENDBACK_AMOUNT; -const TARGET_CHAIN_TOKEN_TRANSFER_AMOUNT = 100_000_000_000n; - -let balanceUniqueTokenInit: bigint; -let balanceUniqueTokenMiddle: bigint; -let balanceUniqueTokenFinal: bigint; -let unqFees: bigint; +import {ASTAR_DECIMALS, SAFE_XCM_VERSION, SENDER_BUDGET, UNIQUE_CHAIN, UNQ_DECIMALS, XcmTestHelper, acalaUrl, astarUrl, moonbeamUrl, polkadexUrl, relayUrl, uniqueAssetId} from './xcm.types'; const testHelper = new XcmTestHelper('unique'); -async function genericSendUnqTo( - networkName: keyof typeof NETWORKS, - randomAccount: IKeyringPair, - randomAccountOnTargetChain = randomAccount, -) { - const networkUrl = mapToChainUrl(networkName); - const targetPlayground = getDevPlayground(networkName); - await usingPlaygrounds(async (helper) => { - balanceUniqueTokenInit = await helper.balance.getSubstrate(randomAccount.address); - const destination = { - V2: { - parents: 1, - interior: { - X1: { - Parachain: mapToChainId(networkName), - }, - }, - }, - }; - - const beneficiary = { - V2: { - parents: 0, - interior: { - X1: ( - networkName == 'moonbeam' ? - { - AccountKey20: { - network: 'Any', - key: randomAccountOnTargetChain.address, - }, - } - : - { - AccountId32: { - network: 'Any', - id: randomAccountOnTargetChain.addressRaw, - }, - } - ), - }, - }, - }; - - const assets = { - V2: [ - { - id: { - Concrete: { - parents: 0, - interior: 'Here', - }, - }, - fun: { - Fungible: TRANSFER_AMOUNT, - }, - }, - ], - }; - const feeAssetItem = 0; - - await helper.xcm.limitedReserveTransferAssets(randomAccount, destination, beneficiary, assets, feeAssetItem, 'Unlimited'); - const messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); - balanceUniqueTokenMiddle = await helper.balance.getSubstrate(randomAccount.address); - - unqFees = balanceUniqueTokenInit - balanceUniqueTokenMiddle - TRANSFER_AMOUNT; - console.log('[Unique -> %s] transaction fees on Unique: %s UNQ', networkName, helper.util.bigIntToDecimals(unqFees)); - expect(unqFees > 0n, 'Negative fees UNQ, looks like nothing was transferred').to.be.true; - - await targetPlayground(networkUrl, async (helper) => { - /* - Since only the parachain part of the Polkadex - infrastructure is launched (without their - solochain validators), processing incoming - assets will lead to an error. - This error indicates that the Polkadex chain - received a message from the Unique network, - since the hash is being checked to ensure - it matches what was sent. - */ - if(networkName == 'polkadex') { - await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Fail, event => event.messageHash == messageSent.messageHash); - } else { - await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Success, event => event.messageHash == messageSent.messageHash); - } - }); - - }); -} - -async function genericSendUnqBack( - networkName: keyof typeof NETWORKS, - sudoer: IKeyringPair, - randomAccountOnUnq: IKeyringPair, -) { - const networkUrl = mapToChainUrl(networkName); - - const targetPlayground = getDevPlayground(networkName); - await usingPlaygrounds(async (helper) => { - - const xcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( - randomAccountOnUnq.addressRaw, - uniqueAssetId, - SENDBACK_AMOUNT, - ); - - let xcmProgramSent: any; - - - await targetPlayground(networkUrl, async (helper) => { - if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, xcmProgram); - xcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); - } else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, xcmProgram]); - // Needed to bypass the call filter. - const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal(`sending ${networkName} -> Unique via XCM program`, batchCall); - xcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); - } - }); - - await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.Success, event => event.messageHash == xcmProgramSent.messageHash); - - balanceUniqueTokenFinal = await helper.balance.getSubstrate(randomAccountOnUnq.address); - - expect(balanceUniqueTokenFinal).to.be.equal(balanceUniqueTokenInit - unqFees - STAYED_ON_TARGET_CHAIN); - - }); -} - -async function genericSendOnlyOwnedBalance( - networkName: keyof typeof NETWORKS, - sudoer: IKeyringPair, -) { - const networkUrl = mapToChainUrl(networkName); - const targetPlayground = getDevPlayground(networkName); - - const targetChainBalance = 10000n * (10n ** UNQ_DECIMALS); - - await usingPlaygrounds(async (helper) => { - const targetChainSovereignAccount = helper.address.paraSiblingSovereignAccount(mapToChainId(networkName)); - await helper.getSudo().balance.setBalanceSubstrate(sudoer, targetChainSovereignAccount, targetChainBalance); - const moreThanTargetChainHas = 2n * targetChainBalance; - - const targetAccount = helper.arrange.createEmptyAccount(); - const maliciousXcmProgram = helper.arrange.makeXcmProgramWithdrawDeposit( - targetAccount.addressRaw, - { - Concrete: { - parents: 0, - interior: 'Here', - }, - }, - moreThanTargetChainHas, - ); - - let maliciousXcmProgramSent: any; - - - await targetPlayground(networkUrl, async (helper) => { - if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgram); - maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); - } else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgram]); - // Needed to bypass the call filter. - const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal(`sending ${networkName} -> Unique via XCM program`, batchCall); - maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); - } - }); - - await expectFailedToTransact(helper, maliciousXcmProgramSent); - - const targetAccountBalance = await helper.balance.getSubstrate(targetAccount.address); - expect(targetAccountBalance).to.be.equal(0n); - }); -} - -async function genericReserveTransferUNQfrom(netwokrName: keyof typeof NETWORKS, sudoer: IKeyringPair) { - const networkUrl = mapToChainUrl(netwokrName); - const targetPlayground = getDevPlayground(netwokrName); - - await usingPlaygrounds(async (helper) => { - const testAmount = 10_000n * (10n ** UNQ_DECIMALS); - const targetAccount = helper.arrange.createEmptyAccount(); - - const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( - targetAccount.addressRaw, - uniqueAssetId, - testAmount, - ); - - const maliciousXcmProgramHereId = helper.arrange.makeXcmProgramReserveAssetDeposited( - targetAccount.addressRaw, - { - Concrete: { - parents: 0, - interior: 'Here', - }, - }, - testAmount, - ); - - let maliciousXcmProgramFullIdSent: any; - let maliciousXcmProgramHereIdSent: any; - const maxWaitBlocks = 3; - - // Try to trick Unique using full UNQ identification - await targetPlayground(networkUrl, async (helper) => { - if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgramFullId); - maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); - } - // Moonbeam case - else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramFullId]); - // Needed to bypass the call filter. - const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal(`${netwokrName} try to act like a reserve location for UNQ using path asset identification`,batchCall); - - maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); - } - }); - - - await expectUntrustedReserveLocationFail(helper, maliciousXcmProgramFullIdSent); - - let accountBalance = await helper.balance.getSubstrate(targetAccount.address); - expect(accountBalance).to.be.equal(0n); - - // Try to trick Unique using shortened UNQ identification - await targetPlayground(networkUrl, async (helper) => { - if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgramHereId); - maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); - } - else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramHereId]); - // Needed to bypass the call filter. - const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal(`${netwokrName} try to act like a reserve location for UNQ using "here" asset identification`, batchCall); - - maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); - } - }); - - await expectUntrustedReserveLocationFail(helper, maliciousXcmProgramHereIdSent); - - accountBalance = await helper.balance.getSubstrate(targetAccount.address); - expect(accountBalance).to.be.equal(0n); - }); -} - -async function genericRejectNativeTokensFrom(networkName: keyof typeof NETWORKS, sudoerOnTargetChain: IKeyringPair) { - const networkUrl = mapToChainUrl(networkName); - const targetPlayground = getDevPlayground(networkName); - let messageSent: any; - - await usingPlaygrounds(async (helper) => { - const maliciousXcmProgramFullId = helper.arrange.makeXcmProgramReserveAssetDeposited( - helper.arrange.createEmptyAccount().addressRaw, - { - Concrete: { - parents: 1, - interior: { - X1: { - Parachain: mapToChainId(networkName), - }, - }, - }, - }, - TARGET_CHAIN_TOKEN_TRANSFER_AMOUNT, - ); - await targetPlayground(networkUrl, async (helper) => { - if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoerOnTargetChain, uniqueVersionedMultilocation, maliciousXcmProgramFullId); - messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); - } else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramFullId]); - // Needed to bypass the call filter. - const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal(`${networkName} sending native tokens to the Unique via fast democracy`, batchCall); - - messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); - } - }); - await expectFailedToTransact(helper, messageSent); - }); -} describeXCM('[XCMLL] Integration test: Exchanging tokens with Acala', () => { @@ -375,24 +71,23 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Acala', () => { await usingPlaygrounds(async (helper) => { await helper.balance.transferToSubstrate(alice, randomAccount.address, SENDER_BUDGET); - balanceUniqueTokenInit = await helper.balance.getSubstrate(randomAccount.address); }); }); itSub('Should connect and send UNQ to Acala', async () => { - await genericSendUnqTo('acala', randomAccount); + await testHelper.sendUnqTo('acala', randomAccount); }); itSub('Should connect to Acala and send UNQ back', async () => { - await genericSendUnqBack('acala', alice, randomAccount); + await testHelper.sendUnqBack('acala', alice, randomAccount); }); itSub('Acala can send only up to its balance', async () => { - await genericSendOnlyOwnedBalance('acala', alice); + await testHelper.sendOnlyOwnedBalance('acala', alice); }); itSub('Should not accept reserve transfer of UNQ from Acala', async () => { - await genericReserveTransferUNQfrom('acala', alice); + await testHelper.reserveTransferUNQfrom('acala', alice); }); }); @@ -430,7 +125,6 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Polkadex', () => { await usingPlaygrounds(async (helper) => { await helper.balance.transferToSubstrate(alice, randomAccount.address, SENDER_BUDGET); - balanceUniqueTokenInit = await helper.balance.getSubstrate(randomAccount.address); }); }); @@ -467,19 +161,19 @@ describeXCM('[XCMLL] Integration test: Unique rejects non-native tokens', () => }); itSub('Unique rejects ACA tokens from Acala', async () => { - await genericRejectNativeTokensFrom('acala', alice); + await testHelper.rejectNativeTokensFrom('acala', alice); }); itSub('Unique rejects GLMR tokens from Moonbeam', async () => { - await genericRejectNativeTokensFrom('moonbeam', alice); + await testHelper.rejectNativeTokensFrom('moonbeam', alice); }); itSub('Unique rejects ASTR tokens from Astar', async () => { - await genericRejectNativeTokensFrom('astar', alice); + await testHelper.rejectNativeTokensFrom('astar', alice); }); itSub('Unique rejects PDX tokens from Polkadex', async () => { - await genericRejectNativeTokensFrom('polkadex', alice); + await testHelper.rejectNativeTokensFrom('polkadex', alice); }); }); @@ -570,7 +264,6 @@ describeXCM('[XCMLL] Integration test: Exchanging UNQ with Moonbeam', () => { await usingPlaygrounds(async (helper) => { await helper.balance.transferToSubstrate(alice, randomAccountUnique.address, SENDER_BUDGET); - balanceUniqueTokenInit = await helper.balance.getSubstrate(randomAccountUnique.address); }); }); @@ -657,19 +350,19 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Astar', () => { }); itSub('Should connect and send UNQ to Astar', async () => { - await genericSendUnqTo('astar', randomAccount); + await testHelper.sendUnqTo('astar', randomAccount); }); itSub('Should connect to Astar and send UNQ back', async () => { - await genericSendUnqBack('astar', alice, randomAccount); + await testHelper.sendUnqBack('astar', alice, randomAccount); }); itSub('Astar can send only up to its balance', async () => { - await genericSendOnlyOwnedBalance('astar', alice); + await testHelper.sendOnlyOwnedBalance('astar', alice); }); itSub('Should not accept reserve transfer of UNQ from Astar', async () => { - await genericReserveTransferUNQfrom('astar', alice); + await testHelper.reserveTransferUNQfrom('astar', alice); }); }); diff --git a/tests/src/xcm/xcm.types.ts b/tests/src/xcm/xcm.types.ts index 5e6bd5bebb..630027b9ae 100644 --- a/tests/src/xcm/xcm.types.ts +++ b/tests/src/xcm/xcm.types.ts @@ -3,7 +3,6 @@ import {hexToString} from '@polkadot/util'; import {expect, usingAcalaPlaygrounds, usingAstarPlaygrounds, usingKaruraPlaygrounds, usingMoonbeamPlaygrounds, usingMoonriverPlaygrounds, usingPlaygrounds, usingPolkadexPlaygrounds, usingRelayPlaygrounds, usingShidenPlaygrounds} from '../util'; import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; import config from '../config'; -import {blake2AsHex} from '@polkadot/util-crypto'; export const UNIQUE_CHAIN = +(process.env.RELAY_UNIQUE_ID || 2037); export const STATEMINT_CHAIN = +(process.env.RELAY_STATEMINT_ID || 1000); @@ -136,10 +135,10 @@ export function getDevPlayground(name: NetworkNames) { } export const TRANSFER_AMOUNT = 2000000_000_000_000_000_000_000n; -const SENDER_BUDGET = 2n * TRANSFER_AMOUNT; -const SENDBACK_AMOUNT = TRANSFER_AMOUNT / 2n; -const STAYED_ON_TARGET_CHAIN = TRANSFER_AMOUNT - SENDBACK_AMOUNT; -const TARGET_CHAIN_TOKEN_TRANSFER_AMOUNT = 100_000_000_000n; +export const SENDER_BUDGET = 2n * TRANSFER_AMOUNT; +export const SENDBACK_AMOUNT = TRANSFER_AMOUNT / 2n; +export const STAYED_ON_TARGET_CHAIN = TRANSFER_AMOUNT - SENDBACK_AMOUNT; +export const TARGET_CHAIN_TOKEN_TRANSFER_AMOUNT = 100_000_000_000n; export class XcmTestHelper { private _balanceUniqueTokenInit: bigint = 0n; @@ -163,6 +162,7 @@ export class XcmTestHelper { return UNIQUE_CHAIN; } } + private _isAddress20FormatFor(network: NetworkNames) { switch (network) { case 'moonbeam': @@ -173,6 +173,19 @@ export class XcmTestHelper { } } + private _runtimeVersionedMultilocation() { + return { + V3: { + parents: 1, + interior: { + X1: { + Parachain: this._getNativeId(), + }, + }, + }, + }; + } + uniqueChainMultilocationForRelay() { return { V3: { @@ -250,8 +263,8 @@ export class XcmTestHelper { this._balanceUniqueTokenMiddle = await helper.balance.getSubstrate(randomAccount.address); this._unqFees = this._balanceUniqueTokenInit - this._balanceUniqueTokenMiddle - TRANSFER_AMOUNT; - console.log('[Unique -> %s] transaction fees on Unique: %s UNQ', networkName, helper.util.bigIntToDecimals(this._unqFees)); - expect(this._unqFees > 0n, 'Negative fees UNQ, looks like nothing was transferred').to.be.true; + console.log('[%s -> %s] transaction fees: %s', this._nativeRuntime, networkName, helper.util.bigIntToDecimals(this._unqFees)); + expect(this._unqFees > 0n, 'Negative fees, looks like nothing was transferred').to.be.true; await targetPlayground(networkUrl, async (helper) => { /* @@ -302,10 +315,10 @@ export class XcmTestHelper { await targetPlayground(networkUrl, async (helper) => { if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, xcmProgram); + await helper.getSudo().xcm.send(sudoer, this._runtimeVersionedMultilocation(), xcmProgram); xcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, xcmProgram]); + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [this._runtimeVersionedMultilocation(), xcmProgram]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); await helper.fastDemocracy.executeProposal(`sending ${networkName} -> Unique via XCM program`, batchCall); @@ -354,10 +367,10 @@ export class XcmTestHelper { await targetPlayground(networkUrl, async (helper) => { if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgram); + await helper.getSudo().xcm.send(sudoer, this._runtimeVersionedMultilocation(), maliciousXcmProgram); maliciousXcmProgramSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgram]); + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [this._runtimeVersionedMultilocation(), maliciousXcmProgram]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); await helper.fastDemocracy.executeProposal(`sending ${networkName} -> Unique via XCM program`, batchCall); @@ -413,12 +426,12 @@ export class XcmTestHelper { // Try to trick Unique using full UNQ identification await targetPlayground(networkUrl, async (helper) => { if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgramFullId); + await helper.getSudo().xcm.send(sudoer, this._runtimeVersionedMultilocation(), maliciousXcmProgramFullId); maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } // Moonbeam case else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramFullId]); + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [this._runtimeVersionedMultilocation(), maliciousXcmProgramFullId]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); await helper.fastDemocracy.executeProposal(`${netwokrName} try to act like a reserve location for UNQ using path asset identification`,batchCall); @@ -436,11 +449,11 @@ export class XcmTestHelper { // Try to trick Unique using shortened UNQ identification await targetPlayground(networkUrl, async (helper) => { if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoer, uniqueVersionedMultilocation, maliciousXcmProgramHereId); + await helper.getSudo().xcm.send(sudoer, this._runtimeVersionedMultilocation(), maliciousXcmProgramHereId); maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramHereId]); + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [this._runtimeVersionedMultilocation(), maliciousXcmProgramHereId]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); await helper.fastDemocracy.executeProposal(`${netwokrName} try to act like a reserve location for UNQ using "here" asset identification`, batchCall); @@ -478,10 +491,10 @@ export class XcmTestHelper { ); await targetPlayground(networkUrl, async (helper) => { if('getSudo' in helper) { - await helper.getSudo().xcm.send(sudoerOnTargetChain, uniqueVersionedMultilocation, maliciousXcmProgramFullId); + await helper.getSudo().xcm.send(sudoerOnTargetChain, this._runtimeVersionedMultilocation(), maliciousXcmProgramFullId); messageSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } else if('fastDemocracy' in helper) { - const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [uniqueVersionedMultilocation, maliciousXcmProgramFullId]); + const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [this._runtimeVersionedMultilocation(), maliciousXcmProgramFullId]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); await helper.fastDemocracy.executeProposal(`${networkName} sending native tokens to the Unique via fast democracy`, batchCall); diff --git a/tests/src/xcm/xcmQuartz.test.ts b/tests/src/xcm/xcmQuartz.test.ts index fe7d1083b1..41b3c0aae6 100644 --- a/tests/src/xcm/xcmQuartz.test.ts +++ b/tests/src/xcm/xcmQuartz.test.ts @@ -18,6 +18,7 @@ import {IKeyringPair} from '@polkadot/types/types'; import {itSub, expect, describeXCM, usingPlaygrounds, usingKaruraPlaygrounds, usingRelayPlaygrounds, usingMoonriverPlaygrounds, usingStateminePlaygrounds, usingShidenPlaygrounds} from '../util'; import {DevUniqueHelper, Event} from '../util/playgrounds/unique.dev'; import {STATEMINE_CHAIN, QUARTZ_CHAIN, KARURA_CHAIN, MOONRIVER_CHAIN, SHIDEN_CHAIN, STATEMINE_DECIMALS, KARURA_DECIMALS, QTZ_DECIMALS, RELAY_DECIMALS, SHIDEN_DECIMALS, karuraUrl, moonriverUrl, relayUrl, shidenUrl, statemineUrl} from './xcm.types'; +import {hexToString} from '@polkadot/util'; @@ -483,7 +484,14 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Karura', () => { minimalBalance: 1000000000000000000n, }; - await helper.getSudo().assetRegistry.registerForeignAsset(alice, destination, metadata); + const assets = (await (helper.callRpc('api.query.assetRegistry.assetMetadatas.entries'))).map(([_k, v]: [any, any]) => + hexToString(v.toJSON()['symbol'])) as string[]; + + if(!assets.includes('QTZ')) { + await helper.getSudo().assetRegistry.registerForeignAsset(alice, destination, metadata); + } else { + console.log('QTZ token already registered on Karura assetRegistry pallet'); + } await helper.balance.transferToSubstrate(alice, randomAccount.address, 10000000000000n); balanceKaruraTokenInit = await helper.balance.getSubstrate(randomAccount.address); balanceQuartzForeignTokenInit = await helper.tokens.accounts(randomAccount.address, {ForeignAsset: 0}); @@ -969,19 +977,22 @@ describeXCM('[XCM] Integration test: Exchanging QTZ with Moonriver', () => { const unitsPerSecond = 1n; const numAssetsWeightHint = 0; - const encodedProposal = helper.assetManager.makeRegisterForeignAssetProposal({ - location: quartzAssetLocation, - metadata: quartzAssetMetadata, - existentialDeposit, - isSufficient, - unitsPerSecond, - numAssetsWeightHint, - }); - - console.log('Encoded proposal for registerForeignAsset & setAssetUnitsPerSecond is %s', encodedProposal); - - await helper.fastDemocracy.executeProposal('register QTZ foreign asset', encodedProposal); - + if((await helper.assetManager.assetTypeId(quartzAssetLocation)).toJSON()) { + console.log('Quartz asset already registered on Moonriver'); + } else { + const encodedProposal = helper.assetManager.makeRegisterForeignAssetProposal({ + location: quartzAssetLocation, + metadata: quartzAssetMetadata, + existentialDeposit, + isSufficient, + unitsPerSecond, + numAssetsWeightHint, + }); + + console.log('Encoded proposal for registerForeignAsset & setAssetUnitsPerSecond is %s', encodedProposal); + + await helper.fastDemocracy.executeProposal('register QTZ foreign asset', encodedProposal); + } // >>> Acquire Quartz AssetId Info on Moonriver >>> console.log('Acquire Quartz AssetId Info on Moonriver.......'); @@ -1298,40 +1309,43 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Shiden', () => { }); await usingShidenPlaygrounds(shidenUrl, async (helper) => { - console.log('1. Create foreign asset and metadata'); - // TODO update metadata with values from production - await helper.assets.create( - alice, - QTZ_ASSET_ID_ON_SHIDEN, - alice.address, - QTZ_MINIMAL_BALANCE_ON_SHIDEN, - ); - - await helper.assets.setMetadata( - alice, - QTZ_ASSET_ID_ON_SHIDEN, - 'Cross chain QTZ', - 'xcQTZ', - Number(QTZ_DECIMALS), - ); - - console.log('2. Register asset location on Shiden'); - const assetLocation = { - V2: { - parents: 1, - interior: { - X1: { - Parachain: QUARTZ_CHAIN, + if(!(await helper.callRpc('api.query.assets.asset', [QTZ_ASSET_ID_ON_SHIDEN])).toJSON()) { + console.log('1. Create foreign asset and metadata'); + // TODO update metadata with values from production + await helper.assets.create( + alice, + QTZ_ASSET_ID_ON_SHIDEN, + alice.address, + QTZ_MINIMAL_BALANCE_ON_SHIDEN, + ); + + await helper.assets.setMetadata( + alice, + QTZ_ASSET_ID_ON_SHIDEN, + 'Cross chain QTZ', + 'xcQTZ', + Number(QTZ_DECIMALS), + ); + + console.log('2. Register asset location on Shiden'); + const assetLocation = { + V2: { + parents: 1, + interior: { + X1: { + Parachain: QUARTZ_CHAIN, + }, }, }, - }, - }; - - await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.registerAssetLocation', [assetLocation, QTZ_ASSET_ID_ON_SHIDEN]); + }; - console.log('3. Set QTZ payment for XCM execution on Shiden'); - await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.setAssetUnitsPerSecond', [assetLocation, unitsPerSecond]); + await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.registerAssetLocation', [assetLocation, QTZ_ASSET_ID_ON_SHIDEN]); + console.log('3. Set QTZ payment for XCM execution on Shiden'); + await helper.getSudo().executeExtrinsic(alice, 'api.tx.xcAssetConfig.setAssetUnitsPerSecond', [assetLocation, unitsPerSecond]); + } else { + console.log('QTZ is already registered on Shiden'); + } console.log('4. Transfer 1 SDN to recipient to create the account (needed due to existential balance)'); await helper.balance.transferToSubstrate(alice, sender.address, shidenInitialBalance); }); From de0fd484c6ecf011266b9db3d7e02160722ff2e9 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Fri, 29 Sep 2023 18:47:44 +0700 Subject: [PATCH 064/143] fix: cargo clippy --- runtime/common/config/xcm/mod.rs | 6 +++--- vendor/baedeker-library | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) create mode 160000 vendor/baedeker-library diff --git a/runtime/common/config/xcm/mod.rs b/runtime/common/config/xcm/mod.rs index d27794ba0d..25eb8f2772 100644 --- a/runtime/common/config/xcm/mod.rs +++ b/runtime/common/config/xcm/mod.rs @@ -185,10 +185,10 @@ impl XcmCallFilter { fn allow_utility_call(call: &RuntimeCall) -> bool { match call { RuntimeCall::Utility(pallet_utility::Call::batch { calls, .. }) => { - calls.iter().all(|call| Self::allow_gov_and_sys_call(call)) + calls.iter().all(Self::allow_gov_and_sys_call) } RuntimeCall::Utility(pallet_utility::Call::batch_all { calls, .. }) => { - calls.iter().all(|call| Self::allow_gov_and_sys_call(call)) + calls.iter().all(Self::allow_gov_and_sys_call) } RuntimeCall::Utility(pallet_utility::Call::as_derivative { call, .. }) => { Self::allow_gov_and_sys_call(call) @@ -197,7 +197,7 @@ impl XcmCallFilter { Self::allow_gov_and_sys_call(call) } RuntimeCall::Utility(pallet_utility::Call::force_batch { calls, .. }) => { - calls.iter().all(|call| Self::allow_gov_and_sys_call(call)) + calls.iter().all(Self::allow_gov_and_sys_call) } _ => false, } diff --git a/vendor/baedeker-library b/vendor/baedeker-library new file mode 160000 index 0000000000..9f1eca0cea --- /dev/null +++ b/vendor/baedeker-library @@ -0,0 +1 @@ +Subproject commit 9f1eca0cea9f50ce8486f2a4b9db65892ea12c36 From 44071b633884d321056113fbffbd312a7840dbdc Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Sat, 30 Sep 2023 17:52:25 +0200 Subject: [PATCH 065/143] refactor: use type-safe propertywriter to set/delete properties --- pallets/balances-adapter/src/common.rs | 23 ++ pallets/common/src/benchmarking.rs | 23 +- pallets/common/src/lib.rs | 513 +++++++++++++++++++----- pallets/fungible/src/common.rs | 25 +- pallets/nonfungible/src/benchmarking.rs | 31 +- pallets/nonfungible/src/common.rs | 71 ++-- pallets/nonfungible/src/erc.rs | 1 - pallets/nonfungible/src/lib.rs | 77 +--- pallets/refungible/src/benchmarking.rs | 31 +- pallets/refungible/src/common.rs | 81 ++-- pallets/refungible/src/erc.rs | 1 - pallets/refungible/src/lib.rs | 89 +--- 12 files changed, 661 insertions(+), 305 deletions(-) diff --git a/pallets/balances-adapter/src/common.rs b/pallets/balances-adapter/src/common.rs index 649b0855dd..0dd4fd98bb 100644 --- a/pallets/balances-adapter/src/common.rs +++ b/pallets/balances-adapter/src/common.rs @@ -172,6 +172,20 @@ impl CommonCollectionOperations for NativeFungibleHandle { fail!(>::UnsupportedOperation); } + fn get_token_properties_map(&self, _token_id: TokenId) -> up_data_structs::TokenProperties { + // No token properties are defined on fungibles + up_data_structs::TokenProperties::new() + } + + fn set_token_properties_map(&self, _token_id: TokenId, _map: up_data_structs::TokenProperties) { + // No token properties are defined on fungibles + } + + fn properties_exist(&self, _token: TokenId) -> bool { + // No token properties are defined on fungibles + false + } + fn set_token_property_permissions( &self, _sender: &::CrossAccountId, @@ -277,6 +291,15 @@ impl CommonCollectionOperations for NativeFungibleHandle { Err(up_data_structs::TokenOwnerError::MultipleOwners) } + fn check_token_indirect_owner( + &self, + _token: TokenId, + _maybe_owner: &::CrossAccountId, + _nesting_budget: &dyn up_data_structs::budget::Budget, + ) -> Result { + Ok(false) + } + fn token_owners(&self, _token: TokenId) -> Vec<::CrossAccountId> { vec![] } diff --git a/pallets/common/src/benchmarking.rs b/pallets/common/src/benchmarking.rs index ba2006dcf0..c7837be160 100644 --- a/pallets/common/src/benchmarking.rs +++ b/pallets/common/src/benchmarking.rs @@ -22,8 +22,9 @@ use pallet_evm::account::CrossAccountId; use frame_benchmarking::{benchmarks, account}; use up_data_structs::{ CollectionMode, CreateCollectionData, CollectionId, Property, PropertyKey, PropertyValue, - CollectionPermissions, NestingPermissions, AccessMode, MAX_COLLECTION_NAME_LENGTH, - MAX_COLLECTION_DESCRIPTION_LENGTH, MAX_TOKEN_PREFIX_LENGTH, MAX_PROPERTIES_PER_ITEM, + CollectionPermissions, NestingPermissions, AccessMode, PropertiesPermissionMap, + MAX_COLLECTION_NAME_LENGTH, MAX_COLLECTION_DESCRIPTION_LENGTH, MAX_TOKEN_PREFIX_LENGTH, + MAX_PROPERTIES_PER_ITEM, }; use frame_support::{ traits::{Get, fungible::Balanced, Imbalance, tokens::Precision}, @@ -123,6 +124,16 @@ fn create_collection( ) } +pub fn load_is_admin_and_property_permissions( + collection: &CollectionHandle, + sender: &T::CrossAccountId, +) -> (bool, PropertiesPermissionMap) { + ( + collection.is_owner_or_admin(sender), + >::property_permissions(collection.id), + ) +} + /// Helper macros, which handles all benchmarking preparation in semi-declarative way /// /// `name` is a substrate account @@ -215,4 +226,12 @@ benchmarks! { assert_eq!(collection_handle.permissions.access(), AccessMode::AllowList); }: {collection_handle.check_allowlist(&sender)?;} + + init_token_properties_common { + bench_init!{ + owner: sub; collection: collection(owner); + sender: sub; + sender: cross_from_sub(sender); + }; + }: {load_is_admin_and_property_permissions(&collection, &sender);} } diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index d182b44fbb..53ec3d8452 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -56,6 +56,7 @@ extern crate alloc; use core::{ ops::{Deref, DerefMut}, slice::from_ref, + marker::PhantomData, }; use pallet_evm_coder_substrate::{SubstrateRecorder, WithRecorder}; use sp_std::vec::Vec; @@ -97,6 +98,9 @@ pub mod eth; pub mod helpers; #[allow(missing_docs)] pub mod weights; + +use weights::WeightInfo; + /// Weight info. pub type SelfWeightOf = ::WeightInfo; @@ -865,18 +869,6 @@ pub mod pallet { >; } -/// Represents the change mode for the token property. -pub enum SetPropertyMode { - /// The token already exists. - ExistingToken, - - /// New token. - NewToken { - /// The creator of the token is the recipient. - mint_target_is_sender: bool, - }, -} - /// Value representation with delayed initialization time. pub struct LazyValue T> { value: Option, @@ -892,19 +884,33 @@ impl T> LazyValue { } } - /// Get the value. If it call furst time the value will be initialized. + /// Get the value. If it is called the first time, the value will be initialized. pub fn value(&mut self) -> &T { - if self.value.is_none() { - self.value = Some(self.f.take().unwrap()()) - } - + self.compute_value_if_not_already(); self.value.as_ref().unwrap() } - /// Is value initialized. + /// Get the value. If it is called the first time, the value will be initialized. + pub fn value_mut(&mut self) -> &mut T { + self.compute_value_if_not_already(); + self.value.as_mut().unwrap() + } + + fn into_inner(mut self) -> T { + self.compute_value_if_not_already(); + self.value.unwrap() + } + + /// Is value initialized? pub fn has_value(&self) -> bool { self.value.is_some() } + + fn compute_value_if_not_already(&mut self) { + if self.value.is_none() { + self.value = Some(self.f.take().unwrap()()) + } + } } fn check_token_permissions( @@ -926,10 +932,19 @@ where fail!(>::NoPermission); } - let token_certainly_exist = is_token_owner.has_value() && (*is_token_owner.value())?; - if !token_certainly_exist && !is_token_exist.value() { - fail!(>::TokenNotFound); + let token_exist_due_to_owner_check_success = + is_token_owner.has_value() && (*is_token_owner.value())?; + + // If the token owner check has occurred and succeeded, + // we know the token exists (otherwise, the owner check must fail). + if !token_exist_due_to_owner_check_success { + // If the token owner check didn't occur, + // we must check the token's existence ourselves. + if !is_token_exist.value() { + fail!(>::TokenNotFound); + } } + Ok(()) } @@ -1312,92 +1327,6 @@ impl Pallet { Ok(()) } - /// A batch operation to add, edit or remove properties for a token. - /// It sets or removes a token's properties according to - /// `properties_updates` contents: - /// * sets a property under the with the value provided `(, Some())` - /// * removes a property under the if the value is `None` `(, None)`. - /// - /// All affected properties should have `mutable` permission - /// to be **deleted** or to be **set more than once**, - /// and the sender should have permission to edit those properties. - /// - /// This function fires an event for each property change. - /// In case of an error, all the changes (including the events) will be reverted - /// since the function is transactional. - #[allow(clippy::too_many_arguments)] - pub fn modify_token_properties( - collection: &CollectionHandle, - sender: &T::CrossAccountId, - token_id: TokenId, - is_token_exist: &mut LazyValue, - properties_updates: impl Iterator)>, - mut stored_properties: TokenProperties, - is_token_owner: &mut LazyValue, FTO>, - set_token_properties: impl FnOnce(TokenProperties), - log: evm_coder::ethereum::Log, - ) -> DispatchResult - where - FTO: FnOnce() -> Result, - FTE: FnOnce() -> bool, - { - let mut is_collection_admin = LazyValue::new(|| collection.is_owner_or_admin(sender)); - let mut permissions = LazyValue::new(|| Self::property_permissions(collection.id)); - - let mut changed = false; - for (key, value) in properties_updates { - let permission = permissions - .value() - .get(&key) - .cloned() - .unwrap_or_else(PropertyPermission::none); - - let property_exists = stored_properties.get(&key).is_some(); - - match permission { - PropertyPermission { mutable: false, .. } if property_exists => { - return Err(>::NoPermission.into()); - } - - PropertyPermission { - collection_admin, - token_owner, - .. - } => check_token_permissions::( - collection_admin, - token_owner, - &mut is_collection_admin, - is_token_owner, - is_token_exist, - )?, - } - - match value { - Some(value) => { - stored_properties - .try_set(key.clone(), value) - .map_err(>::from)?; - - Self::deposit_event(Event::TokenPropertySet(collection.id, token_id, key)); - } - None => { - stored_properties.remove(&key).map_err(>::from)?; - - Self::deposit_event(Event::TokenPropertyDeleted(collection.id, token_id, key)); - } - } - - changed = true; - } - - if changed { - >::deposit_log(log); - set_token_properties(stored_properties); - } - - Ok(()) - } - /// Sets or unsets the approval of a given operator. /// /// The `operator` is allowed to transfer all token pieces of the `owner` on their behalf. @@ -2166,6 +2095,22 @@ pub trait CommonCollectionOperations { budget: &dyn Budget, ) -> DispatchResultWithPostInfo; + /// Get token properties raw map. + /// + /// * `token_id` - The token which properties are needed. + fn get_token_properties_map(&self, token_id: TokenId) -> TokenProperties; + + /// Set token properties raw map. + /// + /// * `token_id` - The token for which the properties are being set. + /// * `map` - The raw map containing the token's properties. + fn set_token_properties_map(&self, token_id: TokenId, map: TokenProperties); + + /// Whether the given token has properties. + /// + /// * `token_id` - The token in question. + fn properties_exist(&self, token: TokenId) -> bool; + /// Set token property permissions. /// /// * `sender` - Must be either the owner of the token or its admin. @@ -2309,6 +2254,18 @@ pub trait CommonCollectionOperations { /// * `token` - The token for which you need to find out the owner. fn token_owner(&self, token: TokenId) -> Result; + /// Checks if the `maybe_owner` is the indirect owner of the `token`. + /// + /// * `token` - Id token to check. + /// * `maybe_owner` - The account to check. + /// * `nesting_budget` - A budget that can be spent on nesting tokens. + fn check_token_indirect_owner( + &self, + token: TokenId, + maybe_owner: &T::CrossAccountId, + nesting_budget: &dyn Budget, + ) -> Result; + /// Returns 10 tokens owners in no particular order. /// /// * `token` - The token for which you need to find out the owners. @@ -2420,6 +2377,348 @@ impl From for Error { } } +/// A marker structure that enables the writer implementation +/// to provide the interface to write properties to **newly created** tokens. +pub struct NewTokenPropertyWriter; + +/// A marker structure that enables the writer implementation +/// to provide the interface to write properties to **already existing** tokens. +pub struct ExistingTokenPropertyWriter; + +/// The type-safe interface for writing properties (setting or deleting) to tokens. +/// It has two distinct implementations for newly created tokens and existing ones. +/// +/// This type utilizes the lazy evaluation to avoid repeating the computation +/// of several performance-heavy or PoV-heavy tasks, +/// such as checking the indirect ownership or reading the token property permissions. +pub struct PropertyWriter< + 'a, + T, + Handle, + WriterVariant, + FIsAdmin, + FPropertyPermissions, + FCheckTokenExist, + FGetProperties, +> where + T: Config, + FIsAdmin: FnOnce() -> bool, + FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, +{ + collection: &'a Handle, + is_collection_admin: LazyValue, + property_permissions: LazyValue, + check_token_exist: FCheckTokenExist, + get_properties: FGetProperties, + _phantom: PhantomData<(T, WriterVariant)>, +} + +impl<'a, T, Handle, FIsAdmin, FPropertyPermissions, FCheckTokenExist, FGetProperties> + PropertyWriter< + 'a, + T, + Handle, + NewTokenPropertyWriter, + FIsAdmin, + FPropertyPermissions, + FCheckTokenExist, + FGetProperties, + > where + T: Config, + Handle: CommonCollectionOperations + Deref>, + FIsAdmin: FnOnce() -> bool, + FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, + FCheckTokenExist: Copy + FnOnce(TokenId) -> bool, + FGetProperties: Copy + FnOnce(TokenId) -> TokenProperties, +{ + /// A function to write properties to a **newly created** token. + pub fn write_token_properties( + &mut self, + mint_target_is_sender: bool, + token_id: TokenId, + properties_updates: impl Iterator, + log: evm_coder::ethereum::Log, + ) -> DispatchResult { + self.internal_write_token_properties( + token_id, + properties_updates.map(|p| (p.key, Some(p.value))), + |_| Ok(mint_target_is_sender), + log, + ) + } +} + +impl<'a, T, Handle, FIsAdmin, FPropertyPermissions, FCheckTokenExist, FGetProperties> + PropertyWriter< + 'a, + T, + Handle, + ExistingTokenPropertyWriter, + FIsAdmin, + FPropertyPermissions, + FCheckTokenExist, + FGetProperties, + > where + T: Config, + Handle: CommonCollectionOperations + Deref>, + FIsAdmin: FnOnce() -> bool, + FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, + FCheckTokenExist: Copy + FnOnce(TokenId) -> bool, + FGetProperties: Copy + FnOnce(TokenId) -> TokenProperties, +{ + /// A function to write properties to an **already existing** token. + pub fn write_token_properties( + &mut self, + sender: &T::CrossAccountId, + token_id: TokenId, + properties_updates: impl Iterator)>, + nesting_budget: &dyn Budget, + log: evm_coder::ethereum::Log, + ) -> DispatchResult { + self.internal_write_token_properties( + token_id, + properties_updates, + |collection| collection.check_token_indirect_owner(token_id, sender, nesting_budget), + log, + ) + } +} + +impl< + 'a, + T, + Handle, + WriterVariant, + FIsAdmin, + FPropertyPermissions, + FCheckTokenExist, + FGetProperties, + > + PropertyWriter< + 'a, + T, + Handle, + WriterVariant, + FIsAdmin, + FPropertyPermissions, + FCheckTokenExist, + FGetProperties, + > where + T: Config, + Handle: CommonCollectionOperations + Deref>, + FIsAdmin: FnOnce() -> bool, + FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, + FCheckTokenExist: Copy + FnOnce(TokenId) -> bool, + FGetProperties: Copy + FnOnce(TokenId) -> TokenProperties, +{ + fn internal_write_token_properties( + &mut self, + token_id: TokenId, + properties_updates: impl Iterator)>, + check_token_owner: FCheckTokenOwner, + log: evm_coder::ethereum::Log, + ) -> DispatchResult + where + FCheckTokenOwner: FnOnce(&Handle) -> Result, + { + let get_properties = self.get_properties; + let mut stored_properties = LazyValue::new(move || get_properties(token_id)); + + let mut is_token_owner = LazyValue::new(|| check_token_owner(self.collection)); + + let check_token_exist = self.check_token_exist; + let mut is_token_exist = LazyValue::new(move || check_token_exist(token_id)); + + for (key, value) in properties_updates { + let permission = self + .property_permissions + .value() + .get(&key) + .cloned() + .unwrap_or_else(PropertyPermission::none); + + match permission { + PropertyPermission { mutable: false, .. } + if stored_properties.value().get(&key).is_some() => + { + return Err(>::NoPermission.into()); + } + + PropertyPermission { + collection_admin, + token_owner, + .. + } => check_token_permissions::( + collection_admin, + token_owner, + &mut self.is_collection_admin, + &mut is_token_owner, + &mut is_token_exist, + )?, + } + + match value { + Some(value) => { + stored_properties + .value_mut() + .try_set(key.clone(), value) + .map_err(>::from)?; + + >::deposit_event(Event::TokenPropertySet( + self.collection.id, + token_id, + key, + )); + } + None => { + stored_properties + .value_mut() + .remove(&key) + .map_err(>::from)?; + + >::deposit_event(Event::TokenPropertyDeleted( + self.collection.id, + token_id, + key, + )); + } + } + } + + let properties_changed = stored_properties.has_value(); + if properties_changed { + >::deposit_log(log); + + self.collection + .set_token_properties_map(token_id, stored_properties.into_inner()); + } + + Ok(()) + } +} + +/// Create a [`PropertyWriter`] for newly created tokens. +pub fn property_writer_for_new_token<'a, T, Handle>( + collection: &'a Handle, + sender: &'a T::CrossAccountId, +) -> PropertyWriter< + 'a, + T, + Handle, + NewTokenPropertyWriter, + impl FnOnce() -> bool + 'a, + impl FnOnce() -> PropertiesPermissionMap + 'a, + impl Copy + FnOnce(TokenId) -> bool + 'a, + impl Copy + FnOnce(TokenId) -> TokenProperties + 'a, +> +where + T: Config, + Handle: CommonCollectionOperations + Deref>, +{ + PropertyWriter { + collection, + is_collection_admin: LazyValue::new(|| collection.is_owner_or_admin(sender)), + property_permissions: LazyValue::new(|| >::property_permissions(collection.id)), + check_token_exist: |token_id| { + debug_assert!(collection.token_exists(token_id)); + true + }, + get_properties: |token_id| { + debug_assert!(!collection.properties_exist(token_id)); + TokenProperties::new() + }, + _phantom: PhantomData, + } +} + +#[cfg(feature = "runtime-benchmarks")] +/// Create a `PropertyWriter` with preloaded `is_collection_admin` and `property_permissions. +/// Also: +/// * it will return `true` for the token ownership check. +/// * it will return empty stored properties without reading them from the storage. +pub fn collection_info_loaded_property_writer( + collection: &Handle, + is_collection_admin: bool, + property_permissions: PropertiesPermissionMap, +) -> PropertyWriter< + T, + Handle, + NewTokenPropertyWriter, + impl FnOnce() -> bool, + impl FnOnce() -> PropertiesPermissionMap, + impl Copy + FnOnce(TokenId) -> bool, + impl Copy + FnOnce(TokenId) -> TokenProperties, +> +where + T: Config, + Handle: CommonCollectionOperations + Deref>, +{ + PropertyWriter { + collection, + is_collection_admin: LazyValue::new(move || is_collection_admin), + property_permissions: LazyValue::new(move || property_permissions), + check_token_exist: |_token_id| true, + get_properties: |_token_id| TokenProperties::new(), + _phantom: PhantomData, + } +} + +/// Create a [`PropertyWriter`] for already existing tokens. +pub fn property_writer_for_existing_token<'a, T, Handle>( + collection: &'a Handle, + sender: &'a T::CrossAccountId, +) -> PropertyWriter< + 'a, + T, + Handle, + ExistingTokenPropertyWriter, + impl FnOnce() -> bool + 'a, + impl FnOnce() -> PropertiesPermissionMap + 'a, + impl Copy + FnOnce(TokenId) -> bool + 'a, + impl Copy + FnOnce(TokenId) -> TokenProperties + 'a, +> +where + T: Config, + Handle: CommonCollectionOperations + Deref>, +{ + PropertyWriter { + collection, + is_collection_admin: LazyValue::new(|| collection.is_owner_or_admin(sender)), + property_permissions: LazyValue::new(|| >::property_permissions(collection.id)), + check_token_exist: |token_id| collection.token_exists(token_id), + get_properties: |token_id| collection.get_token_properties_map(token_id), + _phantom: PhantomData, + } +} + +/// Computes the weight delta for newly created tokens with properties. +/// * `properties_nums` - The properties num of each created token. +/// * `init_token_properties` - The function to obtain the weight from a token's properties num. +pub fn init_token_properties_delta Weight>( + properties_nums: impl Iterator, + init_token_properties: I, +) -> Weight { + let mut delta = properties_nums + .filter_map(|properties_num| { + if properties_num > 0 { + Some(init_token_properties(properties_num)) + } else { + None + } + }) + .fold(Weight::zero(), |a, b| a.saturating_add(b)); + + // If at least once the `init_token_properties` was called, + // it means at least one newly created token has properties. + // Becuase of that, some common collection data also was loaded and we need to add this weight. + // However, these common data was loaded only once which is guaranteed by the `PropertyWriter`. + if !delta.is_zero() { + delta = delta.saturating_add(>::init_token_properties_common()) + } + + delta +} + #[cfg(any(feature = "tests", test))] #[allow(missing_docs)] pub mod tests { diff --git a/pallets/fungible/src/common.rs b/pallets/fungible/src/common.rs index d2894b3460..d6cf683914 100644 --- a/pallets/fungible/src/common.rs +++ b/pallets/fungible/src/common.rs @@ -25,7 +25,7 @@ use pallet_common::{ weights::WeightInfo as _, SelfWeightOf as PalletCommonWeightOf, }; use pallet_structure::Error as StructureError; -use sp_runtime::ArithmeticError; +use sp_runtime::{ArithmeticError, DispatchError}; use sp_std::{vec::Vec, vec}; use up_data_structs::{Property, PropertyKey, PropertyValue, PropertyKeyPermission}; @@ -364,6 +364,20 @@ impl CommonCollectionOperations for FungibleHandle { fail!(>::SettingPropertiesNotAllowed) } + fn get_token_properties_map(&self, _token_id: TokenId) -> up_data_structs::TokenProperties { + // No token properties are defined on fungibles + up_data_structs::TokenProperties::new() + } + + fn set_token_properties_map(&self, _token_id: TokenId, _map: up_data_structs::TokenProperties) { + // No token properties are defined on fungibles + } + + fn properties_exist(&self, _token: TokenId) -> bool { + // No token properties are defined on fungibles + false + } + fn check_nesting( &self, _sender: ::CrossAccountId, @@ -402,6 +416,15 @@ impl CommonCollectionOperations for FungibleHandle { Err(TokenOwnerError::MultipleOwners) } + fn check_token_indirect_owner( + &self, + _token: TokenId, + _maybe_owner: &T::CrossAccountId, + _nesting_budget: &dyn Budget, + ) -> Result { + Ok(false) + } + /// Returns 10 tokens owners in no particular order. fn token_owners(&self, token: TokenId) -> Vec { >::token_owners(self.id, token).unwrap_or_default() diff --git a/pallets/nonfungible/src/benchmarking.rs b/pallets/nonfungible/src/benchmarking.rs index 233c2399b7..65367feead 100644 --- a/pallets/nonfungible/src/benchmarking.rs +++ b/pallets/nonfungible/src/benchmarking.rs @@ -20,7 +20,9 @@ use crate::{Pallet, Config, NonfungibleHandle}; use frame_benchmarking::{benchmarks, account}; use pallet_common::{ bench_init, - benchmarking::{create_collection_raw, property_key, property_value}, + benchmarking::{ + create_collection_raw, property_key, property_value, load_is_admin_and_property_permissions, + }, CommonCollectionOperations, }; use sp_std::prelude::*; @@ -198,14 +200,15 @@ benchmarks! { value: property_value(), }).collect::>(); let item = create_max_item(&collection, &owner, owner.clone())?; - }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), SetPropertyMode::ExistingToken, &Unlimited)?} + }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?} - reset_token_properties { + init_token_properties { let b in 0..MAX_PROPERTIES_PER_ITEM; bench_init!{ owner: sub; collection: collection(owner); owner: cross_from_sub; }; + let perms = (0..b).map(|k| PropertyKeyPermission { key: property_key(k as usize), permission: PropertyPermission { @@ -220,7 +223,25 @@ benchmarks! { value: property_value(), }).collect::>(); let item = create_max_item(&collection, &owner, owner.clone())?; - }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), SetPropertyMode::NewToken { mint_target_is_sender: true }, &Unlimited)?} + + let (is_collection_admin, property_permissions) = load_is_admin_and_property_permissions(&collection, &owner); + }: { + let mut property_writer = pallet_common::collection_info_loaded_property_writer( + &collection, + is_collection_admin, + property_permissions, + ); + + property_writer.write_token_properties( + true, + item, + props.into_iter(), + crate::erc::ERC721TokenEvent::TokenChanged { + token_id: item.into(), + } + .to_log(T::ContractAddress::get()), + )? + } delete_token_properties { let b in 0..MAX_PROPERTIES_PER_ITEM; @@ -242,7 +263,7 @@ benchmarks! { value: property_value(), }).collect::>(); let item = create_max_item(&collection, &owner, owner.clone())?; - >::set_token_properties(&collection, &owner, item, props.into_iter(), SetPropertyMode::ExistingToken, &Unlimited)?; + >::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?; let to_delete = (0..b).map(|k| property_key(k as usize)).collect::>(); }: {>::delete_token_properties(&collection, &owner, item, to_delete.into_iter(), &Unlimited)?} diff --git a/pallets/nonfungible/src/common.rs b/pallets/nonfungible/src/common.rs index bf2f2118d7..c50f79ce02 100644 --- a/pallets/nonfungible/src/common.rs +++ b/pallets/nonfungible/src/common.rs @@ -23,49 +23,40 @@ use up_data_structs::{ }; use pallet_common::{ CommonCollectionOperations, CommonWeightInfo, RefungibleExtensions, with_weight, - weights::WeightInfo as _, SelfWeightOf as PalletCommonWeightOf, + weights::WeightInfo as _, SelfWeightOf as PalletCommonWeightOf, init_token_properties_delta, }; +use pallet_structure::Pallet as PalletStructure; use sp_runtime::DispatchError; use sp_std::{vec::Vec, vec}; use crate::{ AccountBalance, Allowance, Config, CreateItemData, Error, NonfungibleHandle, Owned, Pallet, - SelfWeightOf, TokenData, weights::WeightInfo, TokensMinted, + SelfWeightOf, TokenData, weights::WeightInfo, TokensMinted, TokenProperties, }; pub struct CommonWeights(PhantomData); impl CommonWeightInfo for CommonWeights { fn create_multiple_items_ex(data: &CreateItemExData) -> Weight { match data { - CreateItemExData::NFT(t) => { - >::create_multiple_items_ex(t.len() as u32) - + t.iter() - .filter_map(|t| { - if t.properties.len() > 0 { - Some(>::reset_token_properties( - t.properties.len() as u32, - )) - } else { - None - } - }) - .fold(Weight::zero(), |a, b| a.saturating_add(b)) - } + CreateItemExData::NFT(t) => >::create_multiple_items_ex(t.len() as u32) + .saturating_add(init_token_properties_delta::( + t.iter().map(|t| t.properties.len() as u32), + >::init_token_properties, + )), _ => Weight::zero(), } } fn create_multiple_items(data: &[up_data_structs::CreateItemData]) -> Weight { - >::create_multiple_items(data.len() as u32) - + data - .iter() - .filter_map(|t| match t { - up_data_structs::CreateItemData::NFT(n) if n.properties.len() > 0 => Some( - >::reset_token_properties(n.properties.len() as u32), - ), - _ => None, - }) - .fold(Weight::zero(), |a, b| a.saturating_add(b)) + >::create_multiple_items(data.len() as u32).saturating_add( + init_token_properties_delta::( + data.iter().map(|t| match t { + up_data_structs::CreateItemData::NFT(n) => n.properties.len() as u32, + _ => 0, + }), + >::init_token_properties, + ), + ) } fn burn_item() -> Weight { @@ -247,7 +238,6 @@ impl CommonCollectionOperations for NonfungibleHandle { &sender, token_id, properties.into_iter(), - pallet_common::SetPropertyMode::ExistingToken, nesting_budget, ), weight, @@ -275,6 +265,14 @@ impl CommonCollectionOperations for NonfungibleHandle { ) } + fn get_token_properties_map(&self, token_id: TokenId) -> up_data_structs::TokenProperties { + >::get((self.id, token_id)) + } + + fn set_token_properties_map(&self, token_id: TokenId, map: up_data_structs::TokenProperties) { + >::set((self.id, token_id), map) + } + fn set_token_property_permissions( &self, sender: &T::CrossAccountId, @@ -289,6 +287,10 @@ impl CommonCollectionOperations for NonfungibleHandle { ) } + fn properties_exist(&self, token: TokenId) -> bool { + >::contains_key((self.id, token)) + } + fn burn_item( &self, sender: T::CrossAccountId, @@ -459,6 +461,21 @@ impl CommonCollectionOperations for NonfungibleHandle { .ok_or(TokenOwnerError::NotFound) } + fn check_token_indirect_owner( + &self, + token: TokenId, + maybe_owner: &T::CrossAccountId, + nesting_budget: &dyn Budget, + ) -> Result { + >::check_indirectly_owned( + maybe_owner.clone(), + self.id, + token, + None, + nesting_budget, + ) + } + /// Returns token owners. fn token_owners(&self, token: TokenId) -> Vec { self.token_owner(token).map_or_else(|_| vec![], |t| vec![t]) diff --git a/pallets/nonfungible/src/erc.rs b/pallets/nonfungible/src/erc.rs index 8d37ec100c..dc46fe926f 100644 --- a/pallets/nonfungible/src/erc.rs +++ b/pallets/nonfungible/src/erc.rs @@ -203,7 +203,6 @@ impl NonfungibleHandle { &caller, TokenId(token_id), properties.into_iter(), - pallet_common::SetPropertyMode::ExistingToken, &nesting_budget, ) .map_err(dispatch_to_evm::) diff --git a/pallets/nonfungible/src/lib.rs b/pallets/nonfungible/src/lib.rs index 13baa46470..019ee2fa71 100644 --- a/pallets/nonfungible/src/lib.rs +++ b/pallets/nonfungible/src/lib.rs @@ -109,7 +109,7 @@ use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; use pallet_common::{ Error as CommonError, Pallet as PalletCommon, Event as CommonEvent, CollectionHandle, eth::collection_id_to_address, SelfWeightOf as PalletCommonWeightOf, - weights::WeightInfo as CommonWeightInfo, helpers::add_weight_to_post_info, SetPropertyMode, + weights::WeightInfo as CommonWeightInfo, helpers::add_weight_to_post_info, }; use pallet_structure::{Pallet as PalletStructure, Error as StructureError}; use pallet_evm_coder_substrate::{SubstrateRecorder, WithRecorder}; @@ -598,58 +598,16 @@ impl Pallet { sender: &T::CrossAccountId, token_id: TokenId, properties_updates: impl Iterator)>, - mode: SetPropertyMode, nesting_budget: &dyn Budget, ) -> DispatchResult { - let mut is_token_owner = pallet_common::LazyValue::new(|| { - if let SetPropertyMode::NewToken { - mint_target_is_sender, - } = mode - { - return Ok(mint_target_is_sender); - } - - let is_owned = >::check_indirectly_owned( - sender.clone(), - collection.id, - token_id, - None, - nesting_budget, - )?; - - Ok(is_owned) - }); - - let is_new_token = matches!(mode, SetPropertyMode::NewToken { .. }); + let mut property_writer = + pallet_common::property_writer_for_existing_token(collection, sender); - let mut is_token_exist = pallet_common::LazyValue::new(|| { - if is_new_token { - debug_assert!(Self::token_exists(collection, token_id)); - true - } else { - Self::token_exists(collection, token_id) - } - }); - - let stored_properties = if is_new_token { - debug_assert!(!>::contains_key(( - collection.id, - token_id - ))); - TokenPropertiesT::new() - } else { - >::get((collection.id, token_id)) - }; - - >::modify_token_properties( - collection, + property_writer.write_token_properties( sender, token_id, - &mut is_token_exist, properties_updates, - stored_properties, - &mut is_token_owner, - |properties| >::set((collection.id, token_id), properties), + nesting_budget, erc::ERC721TokenEvent::TokenChanged { token_id: token_id.into(), } @@ -680,7 +638,6 @@ impl Pallet { sender: &T::CrossAccountId, token_id: TokenId, properties: impl Iterator, - mode: SetPropertyMode, nesting_budget: &dyn Budget, ) -> DispatchResult { Self::modify_token_properties( @@ -688,7 +645,6 @@ impl Pallet { sender, token_id, properties.map(|p| (p.key, Some(p.value))), - mode, nesting_budget, ) } @@ -710,7 +666,6 @@ impl Pallet { sender, token_id, [property].into_iter(), - SetPropertyMode::ExistingToken, nesting_budget, ) } @@ -732,7 +687,6 @@ impl Pallet { sender, token_id, property_keys.into_iter().map(|key| (key, None)), - SetPropertyMode::ExistingToken, nesting_budget, ) } @@ -994,6 +948,8 @@ impl Pallet { // ========= + let mut property_writer = pallet_common::property_writer_for_new_token(collection, sender); + with_transaction(|| { for (i, data) in data.iter().enumerate() { let token = first_token + i as u32 + 1; @@ -1006,21 +962,22 @@ impl Pallet { }, ); + let token = TokenId(token); + >::nest_if_sent_to_token_unchecked( &data.owner, collection.id, - TokenId(token), + token, ); - if let Err(e) = Self::set_token_properties( - collection, - sender, - TokenId(token), + if let Err(e) = property_writer.write_token_properties( + sender.conv_eq(&data.owner), + token, data.properties.clone().into_iter(), - SetPropertyMode::NewToken { - mint_target_is_sender: sender.conv_eq(&data.owner), - }, - nesting_budget, + erc::ERC721TokenEvent::TokenChanged { + token_id: token.into(), + } + .to_log(T::ContractAddress::get()), ) { return TransactionOutcome::Rollback(Err(e)); } diff --git a/pallets/refungible/src/benchmarking.rs b/pallets/refungible/src/benchmarking.rs index 8fe7629374..630f4782cf 100644 --- a/pallets/refungible/src/benchmarking.rs +++ b/pallets/refungible/src/benchmarking.rs @@ -22,7 +22,9 @@ use core::iter::IntoIterator; use frame_benchmarking::{benchmarks, account}; use pallet_common::{ bench_init, - benchmarking::{create_collection_raw, property_key, property_value}, + benchmarking::{ + create_collection_raw, property_key, property_value, load_is_admin_and_property_permissions, + }, }; use sp_std::prelude::*; use up_data_structs::{ @@ -255,14 +257,15 @@ benchmarks! { value: property_value(), }).collect::>(); let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), SetPropertyMode::ExistingToken, &Unlimited)?} + }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?} - reset_token_properties { + init_token_properties { let b in 0..MAX_PROPERTIES_PER_ITEM; bench_init!{ owner: sub; collection: collection(owner); owner: cross_from_sub; }; + let perms = (0..b).map(|k| PropertyKeyPermission { key: property_key(k as usize), permission: PropertyPermission { @@ -277,7 +280,25 @@ benchmarks! { value: property_value(), }).collect::>(); let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), SetPropertyMode::NewToken { mint_target_is_sender: true }, &Unlimited)?} + + let (is_collection_admin, property_permissions) = load_is_admin_and_property_permissions(&collection, &owner); + }: { + let mut property_writer = pallet_common::collection_info_loaded_property_writer( + &collection, + is_collection_admin, + property_permissions, + ); + + property_writer.write_token_properties( + true, + item, + props.into_iter(), + crate::erc::ERC721TokenEvent::TokenChanged { + token_id: item.into(), + } + .to_log(T::ContractAddress::get()), + )? + } delete_token_properties { let b in 0..MAX_PROPERTIES_PER_ITEM; @@ -299,7 +320,7 @@ benchmarks! { value: property_value(), }).collect::>(); let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - >::set_token_properties(&collection, &owner, item, props.into_iter(), SetPropertyMode::ExistingToken, &Unlimited)?; + >::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?; let to_delete = (0..b).map(|k| property_key(k as usize)).collect::>(); }: {>::delete_token_properties(&collection, &owner, item, to_delete.into_iter(), &Unlimited)?} diff --git a/pallets/refungible/src/common.rs b/pallets/refungible/src/common.rs index b94a8789e3..41f9dbf4d4 100644 --- a/pallets/refungible/src/common.rs +++ b/pallets/refungible/src/common.rs @@ -20,20 +20,20 @@ use sp_std::collections::btree_map::BTreeMap; use frame_support::{dispatch::DispatchResultWithPostInfo, ensure, fail, weights::Weight, traits::Get}; use up_data_structs::{ CollectionId, TokenId, CreateItemExData, budget::Budget, Property, PropertyKey, PropertyValue, - PropertyKeyPermission, CollectionPropertiesVec, CreateRefungibleExMultipleOwners, - CreateRefungibleExSingleOwner, TokenOwnerError, + PropertyKeyPermission, CreateRefungibleExMultipleOwners, CreateRefungibleExSingleOwner, + TokenOwnerError, }; use pallet_common::{ CommonCollectionOperations, CommonWeightInfo, RefungibleExtensions, with_weight, - weights::WeightInfo as _, + weights::WeightInfo as _, init_token_properties_delta, }; -use pallet_structure::Error as StructureError; +use pallet_structure::{Pallet as PalletStructure, Error as StructureError}; use sp_runtime::{DispatchError}; use sp_std::{vec::Vec, vec}; use crate::{ AccountBalance, Allowance, Balance, Config, Error, Owned, Pallet, RefungibleHandle, - SelfWeightOf, weights::WeightInfo, TokensMinted, TotalSupply, CreateItemData, + SelfWeightOf, weights::WeightInfo, TokensMinted, TotalSupply, CreateItemData, TokenProperties, }; macro_rules! max_weight_of { @@ -45,26 +45,19 @@ macro_rules! max_weight_of { }; } -fn properties_weight(properties: &CollectionPropertiesVec) -> Weight { - if properties.len() > 0 { - >::reset_token_properties(properties.len() as u32) - } else { - Weight::zero() - } -} - pub struct CommonWeights(PhantomData); impl CommonWeightInfo for CommonWeights { fn create_multiple_items(data: &[up_data_structs::CreateItemData]) -> Weight { >::create_multiple_items(data.len() as u32).saturating_add( - data.iter() - .map(|data| match data { + init_token_properties_delta::( + data.iter().map(|data| match data { up_data_structs::CreateItemData::ReFungible(rft_data) => { - properties_weight::(&rft_data.properties) + rft_data.properties.len() as u32 } - _ => Weight::zero(), - }) - .fold(Weight::zero(), |a, b| a.saturating_add(b)), + _ => 0, + }), + >::init_token_properties, + ), ) } @@ -72,15 +65,17 @@ impl CommonWeightInfo for CommonWeights { match call { CreateItemExData::RefungibleMultipleOwners(i) => { >::create_multiple_items_ex_multiple_owners(i.users.len() as u32) - .saturating_add(properties_weight::(&i.properties)) + .saturating_add(init_token_properties_delta::( + [i.properties.len() as u32].into_iter(), + >::init_token_properties, + )) } CreateItemExData::RefungibleMultipleItems(i) => { >::create_multiple_items_ex_multiple_items(i.len() as u32) - .saturating_add( - i.iter() - .map(|d| properties_weight::(&d.properties)) - .fold(Weight::zero(), |a, b| a.saturating_add(b)), - ) + .saturating_add(init_token_properties_delta::( + i.iter().map(|d| d.properties.len() as u32), + >::init_token_properties, + )) } _ => Weight::zero(), } @@ -399,7 +394,6 @@ impl CommonCollectionOperations for RefungibleHandle { &sender, token_id, properties.into_iter(), - pallet_common::SetPropertyMode::ExistingToken, nesting_budget, ), weight, @@ -441,6 +435,18 @@ impl CommonCollectionOperations for RefungibleHandle { ) } + fn get_token_properties_map(&self, token_id: TokenId) -> up_data_structs::TokenProperties { + >::get((self.id, token_id)) + } + + fn set_token_properties_map(&self, token_id: TokenId, map: up_data_structs::TokenProperties) { + >::set((self.id, token_id), map) + } + + fn properties_exist(&self, token: TokenId) -> bool { + >::contains_key((self.id, token)) + } + fn check_nesting( &self, _sender: ::CrossAccountId, @@ -479,6 +485,29 @@ impl CommonCollectionOperations for RefungibleHandle { >::token_owner(self.id, token) } + fn check_token_indirect_owner( + &self, + token: TokenId, + maybe_owner: &T::CrossAccountId, + nesting_budget: &dyn Budget, + ) -> Result { + let balance = self.balance(maybe_owner.clone(), token); + let total_pieces: u128 = >::total_pieces(self.id, token).unwrap_or(u128::MAX); + if balance != total_pieces { + return Ok(false); + } + + let is_bundle_owner = >::check_indirectly_owned( + maybe_owner.clone(), + self.id, + token, + None, + nesting_budget, + )?; + + Ok(is_bundle_owner) + } + /// Returns 10 token in no particular order. fn token_owners(&self, token: TokenId) -> Vec { >::token_owners(self.id, token).unwrap_or_default() diff --git a/pallets/refungible/src/erc.rs b/pallets/refungible/src/erc.rs index 51b1425482..3a706cf8ef 100644 --- a/pallets/refungible/src/erc.rs +++ b/pallets/refungible/src/erc.rs @@ -214,7 +214,6 @@ impl RefungibleHandle { &caller, TokenId(token_id), properties.into_iter(), - pallet_common::SetPropertyMode::ExistingToken, &nesting_budget, ) .map_err(dispatch_to_evm::) diff --git a/pallets/refungible/src/lib.rs b/pallets/refungible/src/lib.rs index 84a9c039ad..5482eb65b7 100644 --- a/pallets/refungible/src/lib.rs +++ b/pallets/refungible/src/lib.rs @@ -96,8 +96,8 @@ use frame_support::{ensure, storage::with_transaction, transactional}; use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; use pallet_evm_coder_substrate::WithRecorder; use pallet_common::{ - CommonCollectionOperations, Error as CommonError, eth::collection_id_to_address, - Event as CommonEvent, Pallet as PalletCommon, SetPropertyMode, + Error as CommonError, eth::collection_id_to_address, Event as CommonEvent, + Pallet as PalletCommon, }; use pallet_structure::Pallet as PalletStructure; use sp_core::{Get, H160}; @@ -533,66 +533,16 @@ impl Pallet { sender: &T::CrossAccountId, token_id: TokenId, properties_updates: impl Iterator)>, - mode: SetPropertyMode, nesting_budget: &dyn Budget, ) -> DispatchResult { - let mut is_token_owner = - pallet_common::LazyValue::new(|| -> Result { - if let SetPropertyMode::NewToken { - mint_target_is_sender, - } = mode - { - return Ok(mint_target_is_sender); - } - - let balance = collection.balance(sender.clone(), token_id); - let total_pieces: u128 = - Self::total_pieces(collection.id, token_id).unwrap_or(u128::MAX); - if balance != total_pieces { - return Ok(false); - } - - let is_bundle_owner = >::check_indirectly_owned( - sender.clone(), - collection.id, - token_id, - None, - nesting_budget, - )?; - - Ok(is_bundle_owner) - }); - - let is_new_token = matches!(mode, SetPropertyMode::NewToken { .. }); - - let mut is_token_exist = pallet_common::LazyValue::new(|| { - if is_new_token { - debug_assert!(Self::token_exists(collection, token_id)); - true - } else { - Self::token_exists(collection, token_id) - } - }); + let mut property_writer = + pallet_common::property_writer_for_existing_token(collection, sender); - let stored_properties = if is_new_token { - debug_assert!(!>::contains_key(( - collection.id, - token_id - ))); - TokenPropertiesT::new() - } else { - >::get((collection.id, token_id)) - }; - - >::modify_token_properties( - collection, + property_writer.write_token_properties( sender, token_id, - &mut is_token_exist, properties_updates, - stored_properties, - &mut is_token_owner, - |properties| >::set((collection.id, token_id), properties), + nesting_budget, erc::ERC721TokenEvent::TokenChanged { token_id: token_id.into(), } @@ -618,7 +568,6 @@ impl Pallet { sender: &T::CrossAccountId, token_id: TokenId, properties: impl Iterator, - mode: SetPropertyMode, nesting_budget: &dyn Budget, ) -> DispatchResult { Self::modify_token_properties( @@ -626,7 +575,6 @@ impl Pallet { sender, token_id, properties.map(|p| (p.key, Some(p.value))), - mode, nesting_budget, ) } @@ -643,7 +591,6 @@ impl Pallet { sender, token_id, [property].into_iter(), - SetPropertyMode::ExistingToken, nesting_budget, ) } @@ -660,7 +607,6 @@ impl Pallet { sender, token_id, property_keys.into_iter().map(|key| (key, None)), - SetPropertyMode::ExistingToken, nesting_budget, ) } @@ -941,11 +887,15 @@ impl Pallet { // ========= + let mut property_writer = pallet_common::property_writer_for_new_token(collection, sender); + with_transaction(|| { for (i, data) in data.iter().enumerate() { let token_id = first_token_id + i as u32 + 1; >::insert((collection.id, token_id), totals[i]); + let token = TokenId(token_id); + let mut mint_target_is_sender = true; for (user, amount) in data.users.iter() { if *amount == 0 { @@ -955,23 +905,22 @@ impl Pallet { mint_target_is_sender = mint_target_is_sender && sender.conv_eq(user); >::insert((collection.id, token_id, &user), amount); - >::insert((collection.id, &user, TokenId(token_id)), true); + >::insert((collection.id, &user, token), true); >::nest_if_sent_to_token_unchecked( user, collection.id, - TokenId(token_id), + token, ); } - if let Err(e) = Self::set_token_properties( - collection, - sender, - TokenId(token_id), + if let Err(e) = property_writer.write_token_properties( + mint_target_is_sender, + token, data.properties.clone().into_iter(), - SetPropertyMode::NewToken { - mint_target_is_sender, - }, - nesting_budget, + erc::ERC721TokenEvent::TokenChanged { + token_id: token.into(), + } + .to_log(T::ContractAddress::get()), ) { return TransactionOutcome::Rollback(Err(e)); } From 2657b807cead0c98b720975856432a483ff88c29 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Sat, 30 Sep 2023 17:52:57 +0200 Subject: [PATCH 066/143] fix: EREFUSED due to ws://localhost (?) --- tests/src/migrations/correctStateAfterMaintenance.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/src/migrations/correctStateAfterMaintenance.ts b/tests/src/migrations/correctStateAfterMaintenance.ts index eb0be2c648..4ce5126874 100644 --- a/tests/src/migrations/correctStateAfterMaintenance.ts +++ b/tests/src/migrations/correctStateAfterMaintenance.ts @@ -2,7 +2,7 @@ import {usingPlaygrounds} from '../util'; -const WS_ENDPOINT = 'ws://localhost:9944'; +const WS_ENDPOINT = 'ws://127.0.0.1:9944'; const DONOR_SEED = '//Alice'; export const main = async(options: { wsEndpoint: string; donorSeed: string } = { @@ -66,4 +66,4 @@ export const main = async(options: { wsEndpoint: string; donorSeed: string } = { const chunk = (arr: T[], size: number) => Array.from({length: Math.ceil(arr.length / size)}, (_: any, i: number) => - arr.slice(i * size, i * size + size)); \ No newline at end of file + arr.slice(i * size, i * size + size)); From 0900b3de37ab670ff19aea199574fdf3ae7cd7dd Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Sat, 30 Sep 2023 17:53:35 +0200 Subject: [PATCH 067/143] chore: temporary bench nft/rft/common --- pallets/common/src/weights.rs | 71 ++++--- pallets/nonfungible/src/weights.rs | 250 ++++++++++++----------- pallets/refungible/src/weights.rs | 306 ++++++++++++++--------------- 3 files changed, 320 insertions(+), 307 deletions(-) diff --git a/pallets/common/src/weights.rs b/pallets/common/src/weights.rs index b0f12e3051..1753b652eb 100644 --- a/pallets/common/src/weights.rs +++ b/pallets/common/src/weights.rs @@ -3,9 +3,9 @@ //! Autogenerated weights for pallet_common //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` +//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/common/src/weights.rs @@ -36,6 +36,7 @@ pub trait WeightInfo { fn set_collection_properties(b: u32, ) -> Weight; fn delete_collection_properties(b: u32, ) -> Weight; fn check_accesslist() -> Weight; + fn init_token_properties_common() -> Weight; } /// Weights for pallet_common using the Substrate node and recommended hardware. @@ -48,10 +49,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 4_862_000 picoseconds. - Weight::from_parts(5_003_000, 44457) - // Standard Error: 3_889 - .saturating_add(Weight::from_parts(4_918_195, 0).saturating_mul(b.into())) + // Minimum execution time: 2_831_000 picoseconds. + Weight::from_parts(8_612_860, 44457) + // Standard Error: 8_791 + .saturating_add(Weight::from_parts(2_512_039, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -62,10 +63,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `303 + b * (33030 ±0)` // Estimated: `44457` - // Minimum execution time: 4_739_000 picoseconds. - Weight::from_parts(4_887_000, 44457) - // Standard Error: 37_951 - .saturating_add(Weight::from_parts(23_410_931, 0).saturating_mul(b.into())) + // Minimum execution time: 2_650_000 picoseconds. + Weight::from_parts(2_750_000, 44457) + // Standard Error: 28_392 + .saturating_add(Weight::from_parts(8_897_514, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,10 +76,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 4_183_000 picoseconds. - Weight::from_parts(4_391_000, 3535) + // Minimum execution time: 2_730_000 picoseconds. + Weight::from_parts(2_850_000, 3535) .saturating_add(T::DbWeight::get().reads(1_u64)) } + /// Storage: Common IsAdmin (r:1 w:0) + /// Proof: Common IsAdmin (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) + /// Storage: Common CollectionPropertyPermissions (r:1 w:0) + /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + fn init_token_properties_common() -> Weight { + // Proof Size summary in bytes: + // Measured: `326` + // Estimated: `20191` + // Minimum execution time: 3_770_000 picoseconds. + Weight::from_parts(3_930_000, 20191) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } } // For backwards compatibility and tests @@ -90,10 +103,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 4_862_000 picoseconds. - Weight::from_parts(5_003_000, 44457) - // Standard Error: 3_889 - .saturating_add(Weight::from_parts(4_918_195, 0).saturating_mul(b.into())) + // Minimum execution time: 2_831_000 picoseconds. + Weight::from_parts(8_612_860, 44457) + // Standard Error: 8_791 + .saturating_add(Weight::from_parts(2_512_039, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -104,10 +117,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `303 + b * (33030 ±0)` // Estimated: `44457` - // Minimum execution time: 4_739_000 picoseconds. - Weight::from_parts(4_887_000, 44457) - // Standard Error: 37_951 - .saturating_add(Weight::from_parts(23_410_931, 0).saturating_mul(b.into())) + // Minimum execution time: 2_650_000 picoseconds. + Weight::from_parts(2_750_000, 44457) + // Standard Error: 28_392 + .saturating_add(Weight::from_parts(8_897_514, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -117,9 +130,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 4_183_000 picoseconds. - Weight::from_parts(4_391_000, 3535) + // Minimum execution time: 2_730_000 picoseconds. + Weight::from_parts(2_850_000, 3535) .saturating_add(RocksDbWeight::get().reads(1_u64)) } + /// Storage: Common IsAdmin (r:1 w:0) + /// Proof: Common IsAdmin (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) + /// Storage: Common CollectionPropertyPermissions (r:1 w:0) + /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + fn init_token_properties_common() -> Weight { + // Proof Size summary in bytes: + // Measured: `326` + // Estimated: `20191` + // Minimum execution time: 3_770_000 picoseconds. + Weight::from_parts(3_930_000, 20191) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } } diff --git a/pallets/nonfungible/src/weights.rs b/pallets/nonfungible/src/weights.rs index 95961bbb59..f47b137a05 100644 --- a/pallets/nonfungible/src/weights.rs +++ b/pallets/nonfungible/src/weights.rs @@ -3,9 +3,9 @@ //! Autogenerated weights for pallet_nonfungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` +//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/nonfungible/src/weights.rs @@ -46,7 +46,7 @@ pub trait WeightInfo { fn burn_from() -> Weight; fn set_token_property_permissions(b: u32, ) -> Weight; fn set_token_properties(b: u32, ) -> Weight; - fn reset_token_properties(b: u32, ) -> Weight; + fn init_token_properties(b: u32, ) -> Weight; fn delete_token_properties(b: u32, ) -> Weight; fn token_owner() -> Weight; fn set_allowance_for_all() -> Weight; @@ -69,8 +69,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 10_723_000 picoseconds. - Weight::from_parts(11_038_000, 3530) + // Minimum execution time: 4_981_000 picoseconds. + Weight::from_parts(5_160_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -87,10 +87,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 3_170_000 picoseconds. - Weight::from_parts(2_230_822, 3530) - // Standard Error: 417 - .saturating_add(Weight::from_parts(3_552_754, 0).saturating_mul(b.into())) + // Minimum execution time: 1_701_000 picoseconds. + Weight::from_parts(1_750_000, 3530) + // Standard Error: 549 + .saturating_add(Weight::from_parts(2_507_305, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -108,10 +108,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_234_000 picoseconds. - Weight::from_parts(3_359_000, 3481) - // Standard Error: 598 - .saturating_add(Weight::from_parts(4_872_803, 0).saturating_mul(b.into())) + // Minimum execution time: 1_600_000 picoseconds. + Weight::from_parts(1_661_000, 3481) + // Standard Error: 1_210 + .saturating_add(Weight::from_parts(3_472_463, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -136,8 +136,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 17_970_000 picoseconds. - Weight::from_parts(18_458_000, 3530) + // Minimum execution time: 10_401_000 picoseconds. + Weight::from_parts(10_701_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -159,8 +159,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 22_995_000 picoseconds. - Weight::from_parts(23_505_000, 3530) + // Minimum execution time: 12_982_000 picoseconds. + Weight::from_parts(13_522_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -185,10 +185,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 22_854_000 picoseconds. - Weight::from_parts(23_291_000, 5874) - // Standard Error: 83_594 - .saturating_add(Weight::from_parts(59_695_924, 0).saturating_mul(b.into())) + // Minimum execution time: 13_201_000 picoseconds. + Weight::from_parts(13_371_000, 5874) + // Standard Error: 124_410 + .saturating_add(Weight::from_parts(41_526_223, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -207,8 +207,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 13_873_000 picoseconds. - Weight::from_parts(14_318_000, 6070) + // Minimum execution time: 8_681_000 picoseconds. + Weight::from_parts(8_891_000, 6070) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -220,8 +220,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 7_895_000 picoseconds. - Weight::from_parts(8_131_000, 3522) + // Minimum execution time: 4_651_000 picoseconds. + Weight::from_parts(4_830_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -233,8 +233,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 7_931_000 picoseconds. - Weight::from_parts(8_185_000, 3522) + // Minimum execution time: 4_721_000 picoseconds. + Weight::from_parts(4_851_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -244,8 +244,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 4_111_000 picoseconds. - Weight::from_parts(4_280_000, 3522) + // Minimum execution time: 2_621_000 picoseconds. + Weight::from_parts(2_760_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -266,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 21_756_000 picoseconds. - Weight::from_parts(22_237_000, 3530) + // Minimum execution time: 13_432_000 picoseconds. + Weight::from_parts(13_681_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -278,17 +278,17 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_411_000 picoseconds. - Weight::from_parts(1_505_000, 20191) - // Standard Error: 14_327 - .saturating_add(Weight::from_parts(8_366_431, 0).saturating_mul(b.into())) + // Minimum execution time: 610_000 picoseconds. + Weight::from_parts(640_000, 20191) + // Standard Error: 22_744 + .saturating_add(Weight::from_parts(5_938_386, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Nonfungible TokenProperties (r:1 w:1) + /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:1 w:0) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. @@ -296,44 +296,41 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 3_628_000 picoseconds. - Weight::from_parts(1_438_616, 36269) - // Standard Error: 13_799 - .saturating_add(Weight::from_parts(5_486_048, 0).saturating_mul(b.into())) + // Minimum execution time: 360_000 picoseconds. + Weight::from_parts(3_526_074, 36269) + // Standard Error: 8_803 + .saturating_add(Weight::from_parts(2_784_005, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenProperties (r:0 w:1) /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. - fn reset_token_properties(b: u32, ) -> Weight { + fn init_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `317 + b * (261 ±0)` - // Estimated: `20191` - // Minimum execution time: 1_016_000 picoseconds. - Weight::from_parts(4_628_460, 20191) - // Standard Error: 23_738 - .saturating_add(Weight::from_parts(5_023_391, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 70_000 picoseconds. + Weight::from_parts(2_216_986, 0) + // Standard Error: 5_264 + .saturating_add(Weight::from_parts(2_408_457, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:1 w:0) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) + /// Storage: Nonfungible TokenProperties (r:1 w:1) + /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn delete_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 3_576_000 picoseconds. - Weight::from_parts(3_709_000, 36269) - // Standard Error: 36_977 - .saturating_add(Weight::from_parts(23_798_574, 0).saturating_mul(b.into())) + // Minimum execution time: 360_000 picoseconds. + Weight::from_parts(380_000, 36269) + // Standard Error: 26_060 + .saturating_add(Weight::from_parts(9_230_365, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -343,8 +340,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 3_614_000 picoseconds. - Weight::from_parts(3_776_000, 3522) + // Minimum execution time: 2_350_000 picoseconds. + Weight::from_parts(2_450_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -353,8 +350,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_345_000 picoseconds. - Weight::from_parts(4_555_000, 0) + // Minimum execution time: 2_040_000 picoseconds. + Weight::from_parts(2_150_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -363,8 +360,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 2_810_000 picoseconds. - Weight::from_parts(2_982_000, 3576) + // Minimum execution time: 1_560_000 picoseconds. + Weight::from_parts(1_670_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -373,8 +370,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 3_050_000 picoseconds. - Weight::from_parts(3_169_000, 36269) + // Minimum execution time: 1_710_000 picoseconds. + Weight::from_parts(1_781_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -394,8 +391,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 10_723_000 picoseconds. - Weight::from_parts(11_038_000, 3530) + // Minimum execution time: 4_981_000 picoseconds. + Weight::from_parts(5_160_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -412,10 +409,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 3_170_000 picoseconds. - Weight::from_parts(2_230_822, 3530) - // Standard Error: 417 - .saturating_add(Weight::from_parts(3_552_754, 0).saturating_mul(b.into())) + // Minimum execution time: 1_701_000 picoseconds. + Weight::from_parts(1_750_000, 3530) + // Standard Error: 549 + .saturating_add(Weight::from_parts(2_507_305, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -433,10 +430,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_234_000 picoseconds. - Weight::from_parts(3_359_000, 3481) - // Standard Error: 598 - .saturating_add(Weight::from_parts(4_872_803, 0).saturating_mul(b.into())) + // Minimum execution time: 1_600_000 picoseconds. + Weight::from_parts(1_661_000, 3481) + // Standard Error: 1_210 + .saturating_add(Weight::from_parts(3_472_463, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -461,8 +458,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 17_970_000 picoseconds. - Weight::from_parts(18_458_000, 3530) + // Minimum execution time: 10_401_000 picoseconds. + Weight::from_parts(10_701_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -484,8 +481,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 22_995_000 picoseconds. - Weight::from_parts(23_505_000, 3530) + // Minimum execution time: 12_982_000 picoseconds. + Weight::from_parts(13_522_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -510,10 +507,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 22_854_000 picoseconds. - Weight::from_parts(23_291_000, 5874) - // Standard Error: 83_594 - .saturating_add(Weight::from_parts(59_695_924, 0).saturating_mul(b.into())) + // Minimum execution time: 13_201_000 picoseconds. + Weight::from_parts(13_371_000, 5874) + // Standard Error: 124_410 + .saturating_add(Weight::from_parts(41_526_223, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -532,8 +529,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 13_873_000 picoseconds. - Weight::from_parts(14_318_000, 6070) + // Minimum execution time: 8_681_000 picoseconds. + Weight::from_parts(8_891_000, 6070) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -545,8 +542,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 7_895_000 picoseconds. - Weight::from_parts(8_131_000, 3522) + // Minimum execution time: 4_651_000 picoseconds. + Weight::from_parts(4_830_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -558,8 +555,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 7_931_000 picoseconds. - Weight::from_parts(8_185_000, 3522) + // Minimum execution time: 4_721_000 picoseconds. + Weight::from_parts(4_851_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -569,8 +566,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 4_111_000 picoseconds. - Weight::from_parts(4_280_000, 3522) + // Minimum execution time: 2_621_000 picoseconds. + Weight::from_parts(2_760_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -591,8 +588,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 21_756_000 picoseconds. - Weight::from_parts(22_237_000, 3530) + // Minimum execution time: 13_432_000 picoseconds. + Weight::from_parts(13_681_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -603,17 +600,17 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_411_000 picoseconds. - Weight::from_parts(1_505_000, 20191) - // Standard Error: 14_327 - .saturating_add(Weight::from_parts(8_366_431, 0).saturating_mul(b.into())) + // Minimum execution time: 610_000 picoseconds. + Weight::from_parts(640_000, 20191) + // Standard Error: 22_744 + .saturating_add(Weight::from_parts(5_938_386, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Nonfungible TokenProperties (r:1 w:1) + /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:1 w:0) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. @@ -621,44 +618,41 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 3_628_000 picoseconds. - Weight::from_parts(1_438_616, 36269) - // Standard Error: 13_799 - .saturating_add(Weight::from_parts(5_486_048, 0).saturating_mul(b.into())) + // Minimum execution time: 360_000 picoseconds. + Weight::from_parts(3_526_074, 36269) + // Standard Error: 8_803 + .saturating_add(Weight::from_parts(2_784_005, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenProperties (r:0 w:1) /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. - fn reset_token_properties(b: u32, ) -> Weight { + fn init_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `317 + b * (261 ±0)` - // Estimated: `20191` - // Minimum execution time: 1_016_000 picoseconds. - Weight::from_parts(4_628_460, 20191) - // Standard Error: 23_738 - .saturating_add(Weight::from_parts(5_023_391, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 70_000 picoseconds. + Weight::from_parts(2_216_986, 0) + // Standard Error: 5_264 + .saturating_add(Weight::from_parts(2_408_457, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Nonfungible TokenData (r:1 w:0) /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) + /// Storage: Nonfungible TokenProperties (r:1 w:1) + /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn delete_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 3_576_000 picoseconds. - Weight::from_parts(3_709_000, 36269) - // Standard Error: 36_977 - .saturating_add(Weight::from_parts(23_798_574, 0).saturating_mul(b.into())) + // Minimum execution time: 360_000 picoseconds. + Weight::from_parts(380_000, 36269) + // Standard Error: 26_060 + .saturating_add(Weight::from_parts(9_230_365, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -668,8 +662,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 3_614_000 picoseconds. - Weight::from_parts(3_776_000, 3522) + // Minimum execution time: 2_350_000 picoseconds. + Weight::from_parts(2_450_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -678,8 +672,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_345_000 picoseconds. - Weight::from_parts(4_555_000, 0) + // Minimum execution time: 2_040_000 picoseconds. + Weight::from_parts(2_150_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -688,8 +682,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 2_810_000 picoseconds. - Weight::from_parts(2_982_000, 3576) + // Minimum execution time: 1_560_000 picoseconds. + Weight::from_parts(1_670_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -698,8 +692,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 3_050_000 picoseconds. - Weight::from_parts(3_169_000, 36269) + // Minimum execution time: 1_710_000 picoseconds. + Weight::from_parts(1_781_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/refungible/src/weights.rs b/pallets/refungible/src/weights.rs index a892487c26..35c61cd06d 100644 --- a/pallets/refungible/src/weights.rs +++ b/pallets/refungible/src/weights.rs @@ -3,9 +3,9 @@ //! Autogenerated weights for pallet_refungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` +//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/refungible/src/weights.rs @@ -52,7 +52,7 @@ pub trait WeightInfo { fn burn_from() -> Weight; fn set_token_property_permissions(b: u32, ) -> Weight; fn set_token_properties(b: u32, ) -> Weight; - fn reset_token_properties(b: u32, ) -> Weight; + fn init_token_properties(b: u32, ) -> Weight; fn delete_token_properties(b: u32, ) -> Weight; fn repartition_item() -> Weight; fn token_owner() -> Weight; @@ -78,8 +78,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 12_168_000 picoseconds. - Weight::from_parts(12_531_000, 3530) + // Minimum execution time: 5_410_000 picoseconds. + Weight::from_parts(5_630_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -98,10 +98,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 2_583_000 picoseconds. - Weight::from_parts(2_726_000, 3530) - // Standard Error: 1_051 - .saturating_add(Weight::from_parts(4_727_433, 0).saturating_mul(b.into())) + // Minimum execution time: 1_210_000 picoseconds. + Weight::from_parts(1_280_000, 3530) + // Standard Error: 1_172 + .saturating_add(Weight::from_parts(3_145_627, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -121,10 +121,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 2_565_000 picoseconds. - Weight::from_parts(2_710_000, 3481) - // Standard Error: 862 - .saturating_add(Weight::from_parts(5_951_711, 0).saturating_mul(b.into())) + // Minimum execution time: 1_230_000 picoseconds. + Weight::from_parts(1_310_000, 3481) + // Standard Error: 1_385 + .saturating_add(Weight::from_parts(3_942_825, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -146,10 +146,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 4_281_000 picoseconds. - Weight::from_parts(3_252_037, 3481) - // Standard Error: 568 - .saturating_add(Weight::from_parts(4_380_356, 0).saturating_mul(b.into())) + // Minimum execution time: 1_610_000 picoseconds. + Weight::from_parts(1_650_000, 3481) + // Standard Error: 1_090 + .saturating_add(Weight::from_parts(3_125_098, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -168,8 +168,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 22_354_000 picoseconds. - Weight::from_parts(22_787_000, 8682) + // Minimum execution time: 12_180_000 picoseconds. + Weight::from_parts(12_580_000, 8682) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -189,8 +189,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 21_689_000 picoseconds. - Weight::from_parts(22_199_000, 3554) + // Minimum execution time: 12_230_000 picoseconds. + Weight::from_parts(12_550_000, 3554) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -202,8 +202,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 13_673_000 picoseconds. - Weight::from_parts(14_067_000, 6118) + // Minimum execution time: 7_630_000 picoseconds. + Weight::from_parts(7_919_000, 6118) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -219,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 15_863_000 picoseconds. - Weight::from_parts(16_197_000, 6118) + // Minimum execution time: 8_840_000 picoseconds. + Weight::from_parts(9_080_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -236,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 18_123_000 picoseconds. - Weight::from_parts(18_501_000, 6118) + // Minimum execution time: 10_270_000 picoseconds. + Weight::from_parts(10_670_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -253,8 +253,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 17_966_000 picoseconds. - Weight::from_parts(18_305_000, 6118) + // Minimum execution time: 10_199_000 picoseconds. + Weight::from_parts(10_479_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -266,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 8_636_000 picoseconds. - Weight::from_parts(8_882_000, 3554) + // Minimum execution time: 4_750_000 picoseconds. + Weight::from_parts(4_910_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -279,8 +279,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 8_756_000 picoseconds. - Weight::from_parts(8_978_000, 3554) + // Minimum execution time: 4_700_000 picoseconds. + Weight::from_parts(4_840_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -294,8 +294,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 19_417_000 picoseconds. - Weight::from_parts(19_945_000, 6118) + // Minimum execution time: 11_419_000 picoseconds. + Weight::from_parts(11_800_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -313,8 +313,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 21_425_000 picoseconds. - Weight::from_parts(21_829_000, 6118) + // Minimum execution time: 12_440_000 picoseconds. + Weight::from_parts(12_740_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -332,8 +332,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 23_854_000 picoseconds. - Weight::from_parts(24_352_000, 6118) + // Minimum execution time: 14_030_000 picoseconds. + Weight::from_parts(14_380_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -351,8 +351,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 23_775_000 picoseconds. - Weight::from_parts(24_236_000, 6118) + // Minimum execution time: 14_200_000 picoseconds. + Weight::from_parts(14_560_000, 6118) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -374,8 +374,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 27_885_000 picoseconds. - Weight::from_parts(28_492_000, 3570) + // Minimum execution time: 16_590_000 picoseconds. + Weight::from_parts(17_079_000, 3570) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -386,17 +386,17 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_299_000 picoseconds. - Weight::from_parts(1_410_000, 20191) - // Standard Error: 14_247 - .saturating_add(Weight::from_parts(8_221_449, 0).saturating_mul(b.into())) + // Minimum execution time: 560_000 picoseconds. + Weight::from_parts(590_000, 20191) + // Standard Error: 20_539 + .saturating_add(Weight::from_parts(5_317_277, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Refungible TokenProperties (r:1 w:1) + /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:1 w:0) /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. @@ -404,44 +404,41 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 2_717_000 picoseconds. - Weight::from_parts(6_076_231, 36269) - // Standard Error: 10_349 - .saturating_add(Weight::from_parts(4_950_943, 0).saturating_mul(b.into())) + // Minimum execution time: 340_000 picoseconds. + Weight::from_parts(4_825_296, 36269) + // Standard Error: 4_846 + .saturating_add(Weight::from_parts(2_632_821, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible TokenProperties (r:0 w:1) /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. - fn reset_token_properties(b: u32, ) -> Weight { + fn init_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `317 + b * (261 ±0)` - // Estimated: `20191` - // Minimum execution time: 936_000 picoseconds. - Weight::from_parts(1_015_000, 20191) - // Standard Error: 9_051 - .saturating_add(Weight::from_parts(5_454_224, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 70_000 picoseconds. + Weight::from_parts(70_000, 0) + // Standard Error: 2_858 + .saturating_add(Weight::from_parts(2_259_760, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:1 w:0) /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: Refungible TokenProperties (r:1 w:1) + /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn delete_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 2_707_000 picoseconds. - Weight::from_parts(2_851_000, 36269) - // Standard Error: 36_568 - .saturating_add(Weight::from_parts(23_557_445, 0).saturating_mul(b.into())) + // Minimum execution time: 320_000 picoseconds. + Weight::from_parts(350_000, 36269) + // Standard Error: 24_256 + .saturating_add(Weight::from_parts(8_471_382, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -453,8 +450,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 10_556_000 picoseconds. - Weight::from_parts(10_904_000, 3554) + // Minimum execution time: 5_600_000 picoseconds. + Weight::from_parts(5_850_000, 3554) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -464,8 +461,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 4_708_000 picoseconds. - Weight::from_parts(4_974_000, 6118) + // Minimum execution time: 2_430_000 picoseconds. + Weight::from_parts(2_590_000, 6118) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -474,8 +471,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_332_000 picoseconds. - Weight::from_parts(4_506_000, 0) + // Minimum execution time: 1_970_000 picoseconds. + Weight::from_parts(2_050_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -484,8 +481,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 2_207_000 picoseconds. - Weight::from_parts(2_358_000, 3576) + // Minimum execution time: 1_140_000 picoseconds. + Weight::from_parts(1_240_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -494,8 +491,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 2_230_000 picoseconds. - Weight::from_parts(2_337_000, 36269) + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_060_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -517,8 +514,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 12_168_000 picoseconds. - Weight::from_parts(12_531_000, 3530) + // Minimum execution time: 5_410_000 picoseconds. + Weight::from_parts(5_630_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -537,10 +534,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 2_583_000 picoseconds. - Weight::from_parts(2_726_000, 3530) - // Standard Error: 1_051 - .saturating_add(Weight::from_parts(4_727_433, 0).saturating_mul(b.into())) + // Minimum execution time: 1_210_000 picoseconds. + Weight::from_parts(1_280_000, 3530) + // Standard Error: 1_172 + .saturating_add(Weight::from_parts(3_145_627, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -560,10 +557,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 2_565_000 picoseconds. - Weight::from_parts(2_710_000, 3481) - // Standard Error: 862 - .saturating_add(Weight::from_parts(5_951_711, 0).saturating_mul(b.into())) + // Minimum execution time: 1_230_000 picoseconds. + Weight::from_parts(1_310_000, 3481) + // Standard Error: 1_385 + .saturating_add(Weight::from_parts(3_942_825, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -585,10 +582,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 4_281_000 picoseconds. - Weight::from_parts(3_252_037, 3481) - // Standard Error: 568 - .saturating_add(Weight::from_parts(4_380_356, 0).saturating_mul(b.into())) + // Minimum execution time: 1_610_000 picoseconds. + Weight::from_parts(1_650_000, 3481) + // Standard Error: 1_090 + .saturating_add(Weight::from_parts(3_125_098, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -607,8 +604,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 22_354_000 picoseconds. - Weight::from_parts(22_787_000, 8682) + // Minimum execution time: 12_180_000 picoseconds. + Weight::from_parts(12_580_000, 8682) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -628,8 +625,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 21_689_000 picoseconds. - Weight::from_parts(22_199_000, 3554) + // Minimum execution time: 12_230_000 picoseconds. + Weight::from_parts(12_550_000, 3554) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -641,8 +638,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 13_673_000 picoseconds. - Weight::from_parts(14_067_000, 6118) + // Minimum execution time: 7_630_000 picoseconds. + Weight::from_parts(7_919_000, 6118) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -658,8 +655,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 15_863_000 picoseconds. - Weight::from_parts(16_197_000, 6118) + // Minimum execution time: 8_840_000 picoseconds. + Weight::from_parts(9_080_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -675,8 +672,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 18_123_000 picoseconds. - Weight::from_parts(18_501_000, 6118) + // Minimum execution time: 10_270_000 picoseconds. + Weight::from_parts(10_670_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -692,8 +689,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 17_966_000 picoseconds. - Weight::from_parts(18_305_000, 6118) + // Minimum execution time: 10_199_000 picoseconds. + Weight::from_parts(10_479_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -705,8 +702,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 8_636_000 picoseconds. - Weight::from_parts(8_882_000, 3554) + // Minimum execution time: 4_750_000 picoseconds. + Weight::from_parts(4_910_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -718,8 +715,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 8_756_000 picoseconds. - Weight::from_parts(8_978_000, 3554) + // Minimum execution time: 4_700_000 picoseconds. + Weight::from_parts(4_840_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -733,8 +730,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 19_417_000 picoseconds. - Weight::from_parts(19_945_000, 6118) + // Minimum execution time: 11_419_000 picoseconds. + Weight::from_parts(11_800_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -752,8 +749,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 21_425_000 picoseconds. - Weight::from_parts(21_829_000, 6118) + // Minimum execution time: 12_440_000 picoseconds. + Weight::from_parts(12_740_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -771,8 +768,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 23_854_000 picoseconds. - Weight::from_parts(24_352_000, 6118) + // Minimum execution time: 14_030_000 picoseconds. + Weight::from_parts(14_380_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -790,8 +787,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 23_775_000 picoseconds. - Weight::from_parts(24_236_000, 6118) + // Minimum execution time: 14_200_000 picoseconds. + Weight::from_parts(14_560_000, 6118) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -813,8 +810,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 27_885_000 picoseconds. - Weight::from_parts(28_492_000, 3570) + // Minimum execution time: 16_590_000 picoseconds. + Weight::from_parts(17_079_000, 3570) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -825,17 +822,17 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_299_000 picoseconds. - Weight::from_parts(1_410_000, 20191) - // Standard Error: 14_247 - .saturating_add(Weight::from_parts(8_221_449, 0).saturating_mul(b.into())) + // Minimum execution time: 560_000 picoseconds. + Weight::from_parts(590_000, 20191) + // Standard Error: 20_539 + .saturating_add(Weight::from_parts(5_317_277, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: Refungible TokenProperties (r:1 w:1) + /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:1 w:0) /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. @@ -843,44 +840,41 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 2_717_000 picoseconds. - Weight::from_parts(6_076_231, 36269) - // Standard Error: 10_349 - .saturating_add(Weight::from_parts(4_950_943, 0).saturating_mul(b.into())) + // Minimum execution time: 340_000 picoseconds. + Weight::from_parts(4_825_296, 36269) + // Standard Error: 4_846 + .saturating_add(Weight::from_parts(2_632_821, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible TokenProperties (r:0 w:1) /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. - fn reset_token_properties(b: u32, ) -> Weight { + fn init_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `317 + b * (261 ±0)` - // Estimated: `20191` - // Minimum execution time: 936_000 picoseconds. - Weight::from_parts(1_015_000, 20191) - // Standard Error: 9_051 - .saturating_add(Weight::from_parts(5_454_224, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 70_000 picoseconds. + Weight::from_parts(70_000, 0) + // Standard Error: 2_858 + .saturating_add(Weight::from_parts(2_259_760, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) /// Storage: Refungible TotalSupply (r:1 w:0) /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: Refungible TokenProperties (r:1 w:1) + /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. fn delete_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 2_707_000 picoseconds. - Weight::from_parts(2_851_000, 36269) - // Standard Error: 36_568 - .saturating_add(Weight::from_parts(23_557_445, 0).saturating_mul(b.into())) + // Minimum execution time: 320_000 picoseconds. + Weight::from_parts(350_000, 36269) + // Standard Error: 24_256 + .saturating_add(Weight::from_parts(8_471_382, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -892,8 +886,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 10_556_000 picoseconds. - Weight::from_parts(10_904_000, 3554) + // Minimum execution time: 5_600_000 picoseconds. + Weight::from_parts(5_850_000, 3554) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -903,8 +897,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 4_708_000 picoseconds. - Weight::from_parts(4_974_000, 6118) + // Minimum execution time: 2_430_000 picoseconds. + Weight::from_parts(2_590_000, 6118) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -913,8 +907,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_332_000 picoseconds. - Weight::from_parts(4_506_000, 0) + // Minimum execution time: 1_970_000 picoseconds. + Weight::from_parts(2_050_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -923,8 +917,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 2_207_000 picoseconds. - Weight::from_parts(2_358_000, 3576) + // Minimum execution time: 1_140_000 picoseconds. + Weight::from_parts(1_240_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -933,8 +927,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 2_230_000 picoseconds. - Weight::from_parts(2_337_000, 36269) + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_060_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } From 42bd0c6b380a9c4ffbb8f1d2948fde06603a2886 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Sat, 30 Sep 2023 19:48:10 +0200 Subject: [PATCH 068/143] chore: bench nft/rft/common --- pallets/common/src/weights.rs | 54 +++--- pallets/nonfungible/src/weights.rs | 214 +++++++++++------------ pallets/refungible/src/weights.rs | 270 ++++++++++++++--------------- 3 files changed, 269 insertions(+), 269 deletions(-) diff --git a/pallets/common/src/weights.rs b/pallets/common/src/weights.rs index 1753b652eb..6dfaa8fd55 100644 --- a/pallets/common/src/weights.rs +++ b/pallets/common/src/weights.rs @@ -3,9 +3,9 @@ //! Autogenerated weights for pallet_common //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` +//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/common/src/weights.rs @@ -49,10 +49,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 2_831_000 picoseconds. - Weight::from_parts(8_612_860, 44457) - // Standard Error: 8_791 - .saturating_add(Weight::from_parts(2_512_039, 0).saturating_mul(b.into())) + // Minimum execution time: 4_987_000 picoseconds. + Weight::from_parts(5_119_000, 44457) + // Standard Error: 7_609 + .saturating_add(Weight::from_parts(5_750_459, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -63,10 +63,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `303 + b * (33030 ±0)` // Estimated: `44457` - // Minimum execution time: 2_650_000 picoseconds. - Weight::from_parts(2_750_000, 44457) - // Standard Error: 28_392 - .saturating_add(Weight::from_parts(8_897_514, 0).saturating_mul(b.into())) + // Minimum execution time: 4_923_000 picoseconds. + Weight::from_parts(5_074_000, 44457) + // Standard Error: 36_651 + .saturating_add(Weight::from_parts(23_145_677, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -76,8 +76,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 2_730_000 picoseconds. - Weight::from_parts(2_850_000, 3535) + // Minimum execution time: 4_271_000 picoseconds. + Weight::from_parts(4_461_000, 3535) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Common IsAdmin (r:1 w:0) @@ -88,8 +88,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `20191` - // Minimum execution time: 3_770_000 picoseconds. - Weight::from_parts(3_930_000, 20191) + // Minimum execution time: 5_889_000 picoseconds. + Weight::from_parts(6_138_000, 20191) .saturating_add(T::DbWeight::get().reads(2_u64)) } } @@ -103,10 +103,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 2_831_000 picoseconds. - Weight::from_parts(8_612_860, 44457) - // Standard Error: 8_791 - .saturating_add(Weight::from_parts(2_512_039, 0).saturating_mul(b.into())) + // Minimum execution time: 4_987_000 picoseconds. + Weight::from_parts(5_119_000, 44457) + // Standard Error: 7_609 + .saturating_add(Weight::from_parts(5_750_459, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -117,10 +117,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `303 + b * (33030 ±0)` // Estimated: `44457` - // Minimum execution time: 2_650_000 picoseconds. - Weight::from_parts(2_750_000, 44457) - // Standard Error: 28_392 - .saturating_add(Weight::from_parts(8_897_514, 0).saturating_mul(b.into())) + // Minimum execution time: 4_923_000 picoseconds. + Weight::from_parts(5_074_000, 44457) + // Standard Error: 36_651 + .saturating_add(Weight::from_parts(23_145_677, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -130,8 +130,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 2_730_000 picoseconds. - Weight::from_parts(2_850_000, 3535) + // Minimum execution time: 4_271_000 picoseconds. + Weight::from_parts(4_461_000, 3535) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Common IsAdmin (r:1 w:0) @@ -142,8 +142,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `20191` - // Minimum execution time: 3_770_000 picoseconds. - Weight::from_parts(3_930_000, 20191) + // Minimum execution time: 5_889_000 picoseconds. + Weight::from_parts(6_138_000, 20191) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/pallets/nonfungible/src/weights.rs b/pallets/nonfungible/src/weights.rs index f47b137a05..723099a4d8 100644 --- a/pallets/nonfungible/src/weights.rs +++ b/pallets/nonfungible/src/weights.rs @@ -3,9 +3,9 @@ //! Autogenerated weights for pallet_nonfungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` +//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/nonfungible/src/weights.rs @@ -69,8 +69,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 4_981_000 picoseconds. - Weight::from_parts(5_160_000, 3530) + // Minimum execution time: 9_726_000 picoseconds. + Weight::from_parts(10_059_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -87,10 +87,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 1_701_000 picoseconds. - Weight::from_parts(1_750_000, 3530) - // Standard Error: 549 - .saturating_add(Weight::from_parts(2_507_305, 0).saturating_mul(b.into())) + // Minimum execution time: 3_270_000 picoseconds. + Weight::from_parts(3_693_659, 3530) + // Standard Error: 255 + .saturating_add(Weight::from_parts(3_024_284, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -108,10 +108,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_600_000 picoseconds. - Weight::from_parts(1_661_000, 3481) - // Standard Error: 1_210 - .saturating_add(Weight::from_parts(3_472_463, 0).saturating_mul(b.into())) + // Minimum execution time: 3_188_000 picoseconds. + Weight::from_parts(3_307_000, 3481) + // Standard Error: 567 + .saturating_add(Weight::from_parts(4_320_449, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -136,8 +136,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 10_401_000 picoseconds. - Weight::from_parts(10_701_000, 3530) + // Minimum execution time: 18_062_000 picoseconds. + Weight::from_parts(18_433_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -159,8 +159,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 12_982_000 picoseconds. - Weight::from_parts(13_522_000, 3530) + // Minimum execution time: 22_942_000 picoseconds. + Weight::from_parts(23_527_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -185,10 +185,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 13_201_000 picoseconds. - Weight::from_parts(13_371_000, 5874) - // Standard Error: 124_410 - .saturating_add(Weight::from_parts(41_526_223, 0).saturating_mul(b.into())) + // Minimum execution time: 22_709_000 picoseconds. + Weight::from_parts(23_287_000, 5874) + // Standard Error: 89_471 + .saturating_add(Weight::from_parts(63_285_201, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -207,8 +207,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 8_681_000 picoseconds. - Weight::from_parts(8_891_000, 6070) + // Minimum execution time: 13_652_000 picoseconds. + Weight::from_parts(13_981_000, 6070) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -220,8 +220,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 4_651_000 picoseconds. - Weight::from_parts(4_830_000, 3522) + // Minimum execution time: 7_837_000 picoseconds. + Weight::from_parts(8_113_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -233,8 +233,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 4_721_000 picoseconds. - Weight::from_parts(4_851_000, 3522) + // Minimum execution time: 7_769_000 picoseconds. + Weight::from_parts(7_979_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -244,8 +244,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 2_621_000 picoseconds. - Weight::from_parts(2_760_000, 3522) + // Minimum execution time: 4_194_000 picoseconds. + Weight::from_parts(4_353_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -266,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 13_432_000 picoseconds. - Weight::from_parts(13_681_000, 3530) + // Minimum execution time: 21_978_000 picoseconds. + Weight::from_parts(22_519_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -278,10 +278,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 610_000 picoseconds. - Weight::from_parts(640_000, 20191) - // Standard Error: 22_744 - .saturating_add(Weight::from_parts(5_938_386, 0).saturating_mul(b.into())) + // Minimum execution time: 1_457_000 picoseconds. + Weight::from_parts(1_563_000, 20191) + // Standard Error: 14_041 + .saturating_add(Weight::from_parts(8_452_415, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -296,10 +296,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 360_000 picoseconds. - Weight::from_parts(3_526_074, 36269) - // Standard Error: 8_803 - .saturating_add(Weight::from_parts(2_784_005, 0).saturating_mul(b.into())) + // Minimum execution time: 963_000 picoseconds. + Weight::from_parts(1_126_511, 36269) + // Standard Error: 9_175 + .saturating_add(Weight::from_parts(5_096_011, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -310,10 +310,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 70_000 picoseconds. - Weight::from_parts(2_216_986, 0) - // Standard Error: 5_264 - .saturating_add(Weight::from_parts(2_408_457, 0).saturating_mul(b.into())) + // Minimum execution time: 194_000 picoseconds. + Weight::from_parts(222_000, 0) + // Standard Error: 7_295 + .saturating_add(Weight::from_parts(4_499_463, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Common CollectionPropertyPermissions (r:1 w:0) @@ -327,10 +327,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 360_000 picoseconds. - Weight::from_parts(380_000, 36269) - // Standard Error: 26_060 - .saturating_add(Weight::from_parts(9_230_365, 0).saturating_mul(b.into())) + // Minimum execution time: 992_000 picoseconds. + Weight::from_parts(1_043_000, 36269) + // Standard Error: 37_370 + .saturating_add(Weight::from_parts(23_672_870, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -340,8 +340,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 2_350_000 picoseconds. - Weight::from_parts(2_450_000, 3522) + // Minimum execution time: 3_743_000 picoseconds. + Weight::from_parts(3_908_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -350,8 +350,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_040_000 picoseconds. - Weight::from_parts(2_150_000, 0) + // Minimum execution time: 4_106_000 picoseconds. + Weight::from_parts(4_293_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -360,8 +360,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 1_560_000 picoseconds. - Weight::from_parts(1_670_000, 3576) + // Minimum execution time: 2_775_000 picoseconds. + Weight::from_parts(2_923_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -370,8 +370,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 1_710_000 picoseconds. - Weight::from_parts(1_781_000, 36269) + // Minimum execution time: 3_033_000 picoseconds. + Weight::from_parts(3_174_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -391,8 +391,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 4_981_000 picoseconds. - Weight::from_parts(5_160_000, 3530) + // Minimum execution time: 9_726_000 picoseconds. + Weight::from_parts(10_059_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -409,10 +409,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 1_701_000 picoseconds. - Weight::from_parts(1_750_000, 3530) - // Standard Error: 549 - .saturating_add(Weight::from_parts(2_507_305, 0).saturating_mul(b.into())) + // Minimum execution time: 3_270_000 picoseconds. + Weight::from_parts(3_693_659, 3530) + // Standard Error: 255 + .saturating_add(Weight::from_parts(3_024_284, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -430,10 +430,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_600_000 picoseconds. - Weight::from_parts(1_661_000, 3481) - // Standard Error: 1_210 - .saturating_add(Weight::from_parts(3_472_463, 0).saturating_mul(b.into())) + // Minimum execution time: 3_188_000 picoseconds. + Weight::from_parts(3_307_000, 3481) + // Standard Error: 567 + .saturating_add(Weight::from_parts(4_320_449, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -458,8 +458,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 10_401_000 picoseconds. - Weight::from_parts(10_701_000, 3530) + // Minimum execution time: 18_062_000 picoseconds. + Weight::from_parts(18_433_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -481,8 +481,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 12_982_000 picoseconds. - Weight::from_parts(13_522_000, 3530) + // Minimum execution time: 22_942_000 picoseconds. + Weight::from_parts(23_527_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -507,10 +507,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 13_201_000 picoseconds. - Weight::from_parts(13_371_000, 5874) - // Standard Error: 124_410 - .saturating_add(Weight::from_parts(41_526_223, 0).saturating_mul(b.into())) + // Minimum execution time: 22_709_000 picoseconds. + Weight::from_parts(23_287_000, 5874) + // Standard Error: 89_471 + .saturating_add(Weight::from_parts(63_285_201, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -529,8 +529,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 8_681_000 picoseconds. - Weight::from_parts(8_891_000, 6070) + // Minimum execution time: 13_652_000 picoseconds. + Weight::from_parts(13_981_000, 6070) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -542,8 +542,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 4_651_000 picoseconds. - Weight::from_parts(4_830_000, 3522) + // Minimum execution time: 7_837_000 picoseconds. + Weight::from_parts(8_113_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -555,8 +555,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 4_721_000 picoseconds. - Weight::from_parts(4_851_000, 3522) + // Minimum execution time: 7_769_000 picoseconds. + Weight::from_parts(7_979_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -566,8 +566,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 2_621_000 picoseconds. - Weight::from_parts(2_760_000, 3522) + // Minimum execution time: 4_194_000 picoseconds. + Weight::from_parts(4_353_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -588,8 +588,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 13_432_000 picoseconds. - Weight::from_parts(13_681_000, 3530) + // Minimum execution time: 21_978_000 picoseconds. + Weight::from_parts(22_519_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -600,10 +600,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 610_000 picoseconds. - Weight::from_parts(640_000, 20191) - // Standard Error: 22_744 - .saturating_add(Weight::from_parts(5_938_386, 0).saturating_mul(b.into())) + // Minimum execution time: 1_457_000 picoseconds. + Weight::from_parts(1_563_000, 20191) + // Standard Error: 14_041 + .saturating_add(Weight::from_parts(8_452_415, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -618,10 +618,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 360_000 picoseconds. - Weight::from_parts(3_526_074, 36269) - // Standard Error: 8_803 - .saturating_add(Weight::from_parts(2_784_005, 0).saturating_mul(b.into())) + // Minimum execution time: 963_000 picoseconds. + Weight::from_parts(1_126_511, 36269) + // Standard Error: 9_175 + .saturating_add(Weight::from_parts(5_096_011, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -632,10 +632,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 70_000 picoseconds. - Weight::from_parts(2_216_986, 0) - // Standard Error: 5_264 - .saturating_add(Weight::from_parts(2_408_457, 0).saturating_mul(b.into())) + // Minimum execution time: 194_000 picoseconds. + Weight::from_parts(222_000, 0) + // Standard Error: 7_295 + .saturating_add(Weight::from_parts(4_499_463, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Common CollectionPropertyPermissions (r:1 w:0) @@ -649,10 +649,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 360_000 picoseconds. - Weight::from_parts(380_000, 36269) - // Standard Error: 26_060 - .saturating_add(Weight::from_parts(9_230_365, 0).saturating_mul(b.into())) + // Minimum execution time: 992_000 picoseconds. + Weight::from_parts(1_043_000, 36269) + // Standard Error: 37_370 + .saturating_add(Weight::from_parts(23_672_870, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -662,8 +662,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 2_350_000 picoseconds. - Weight::from_parts(2_450_000, 3522) + // Minimum execution time: 3_743_000 picoseconds. + Weight::from_parts(3_908_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -672,8 +672,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_040_000 picoseconds. - Weight::from_parts(2_150_000, 0) + // Minimum execution time: 4_106_000 picoseconds. + Weight::from_parts(4_293_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -682,8 +682,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 1_560_000 picoseconds. - Weight::from_parts(1_670_000, 3576) + // Minimum execution time: 2_775_000 picoseconds. + Weight::from_parts(2_923_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -692,8 +692,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 1_710_000 picoseconds. - Weight::from_parts(1_781_000, 36269) + // Minimum execution time: 3_033_000 picoseconds. + Weight::from_parts(3_174_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/refungible/src/weights.rs b/pallets/refungible/src/weights.rs index 35c61cd06d..9c1c09667a 100644 --- a/pallets/refungible/src/weights.rs +++ b/pallets/refungible/src/weights.rs @@ -3,9 +3,9 @@ //! Autogenerated weights for pallet_refungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` +//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=80 +// --repeat=400 // --heap-pages=4096 // --output=./pallets/refungible/src/weights.rs @@ -78,8 +78,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 5_410_000 picoseconds. - Weight::from_parts(5_630_000, 3530) + // Minimum execution time: 11_341_000 picoseconds. + Weight::from_parts(11_741_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -98,10 +98,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 1_210_000 picoseconds. - Weight::from_parts(1_280_000, 3530) - // Standard Error: 1_172 - .saturating_add(Weight::from_parts(3_145_627, 0).saturating_mul(b.into())) + // Minimum execution time: 2_665_000 picoseconds. + Weight::from_parts(2_791_000, 3530) + // Standard Error: 996 + .saturating_add(Weight::from_parts(4_343_736, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -121,10 +121,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_230_000 picoseconds. - Weight::from_parts(1_310_000, 3481) - // Standard Error: 1_385 - .saturating_add(Weight::from_parts(3_942_825, 0).saturating_mul(b.into())) + // Minimum execution time: 2_616_000 picoseconds. + Weight::from_parts(2_726_000, 3481) + // Standard Error: 665 + .saturating_add(Weight::from_parts(5_554_066, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -146,10 +146,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_610_000 picoseconds. - Weight::from_parts(1_650_000, 3481) - // Standard Error: 1_090 - .saturating_add(Weight::from_parts(3_125_098, 0).saturating_mul(b.into())) + // Minimum execution time: 3_697_000 picoseconds. + Weight::from_parts(2_136_481, 3481) + // Standard Error: 567 + .saturating_add(Weight::from_parts(4_390_621, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -168,8 +168,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 12_180_000 picoseconds. - Weight::from_parts(12_580_000, 8682) + // Minimum execution time: 22_859_000 picoseconds. + Weight::from_parts(23_295_000, 8682) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -189,8 +189,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 12_230_000 picoseconds. - Weight::from_parts(12_550_000, 3554) + // Minimum execution time: 21_477_000 picoseconds. + Weight::from_parts(22_037_000, 3554) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -202,8 +202,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 7_630_000 picoseconds. - Weight::from_parts(7_919_000, 6118) + // Minimum execution time: 13_714_000 picoseconds. + Weight::from_parts(14_050_000, 6118) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -219,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 8_840_000 picoseconds. - Weight::from_parts(9_080_000, 6118) + // Minimum execution time: 15_879_000 picoseconds. + Weight::from_parts(16_266_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -236,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 10_270_000 picoseconds. - Weight::from_parts(10_670_000, 6118) + // Minimum execution time: 18_186_000 picoseconds. + Weight::from_parts(18_682_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -253,8 +253,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 10_199_000 picoseconds. - Weight::from_parts(10_479_000, 6118) + // Minimum execution time: 17_943_000 picoseconds. + Weight::from_parts(18_333_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -266,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 4_750_000 picoseconds. - Weight::from_parts(4_910_000, 3554) + // Minimum execution time: 8_391_000 picoseconds. + Weight::from_parts(8_637_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -279,8 +279,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 4_700_000 picoseconds. - Weight::from_parts(4_840_000, 3554) + // Minimum execution time: 8_519_000 picoseconds. + Weight::from_parts(8_760_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -294,8 +294,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 11_419_000 picoseconds. - Weight::from_parts(11_800_000, 6118) + // Minimum execution time: 19_554_000 picoseconds. + Weight::from_parts(20_031_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -313,8 +313,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 12_440_000 picoseconds. - Weight::from_parts(12_740_000, 6118) + // Minimum execution time: 21_338_000 picoseconds. + Weight::from_parts(21_803_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -332,8 +332,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 14_030_000 picoseconds. - Weight::from_parts(14_380_000, 6118) + // Minimum execution time: 24_179_000 picoseconds. + Weight::from_parts(24_647_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -351,8 +351,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 14_200_000 picoseconds. - Weight::from_parts(14_560_000, 6118) + // Minimum execution time: 24_008_000 picoseconds. + Weight::from_parts(24_545_000, 6118) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -374,8 +374,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 16_590_000 picoseconds. - Weight::from_parts(17_079_000, 3570) + // Minimum execution time: 27_907_000 picoseconds. + Weight::from_parts(28_489_000, 3570) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -386,10 +386,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 560_000 picoseconds. - Weight::from_parts(590_000, 20191) - // Standard Error: 20_539 - .saturating_add(Weight::from_parts(5_317_277, 0).saturating_mul(b.into())) + // Minimum execution time: 1_460_000 picoseconds. + Weight::from_parts(1_564_000, 20191) + // Standard Error: 14_117 + .saturating_add(Weight::from_parts(8_196_214, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -404,10 +404,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 340_000 picoseconds. - Weight::from_parts(4_825_296, 36269) - // Standard Error: 4_846 - .saturating_add(Weight::from_parts(2_632_821, 0).saturating_mul(b.into())) + // Minimum execution time: 1_012_000 picoseconds. + Weight::from_parts(1_081_000, 36269) + // Standard Error: 6_838 + .saturating_add(Weight::from_parts(5_801_181, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -418,10 +418,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 70_000 picoseconds. - Weight::from_parts(70_000, 0) - // Standard Error: 2_858 - .saturating_add(Weight::from_parts(2_259_760, 0).saturating_mul(b.into())) + // Minimum execution time: 229_000 picoseconds. + Weight::from_parts(253_000, 0) + // Standard Error: 100_218 + .saturating_add(Weight::from_parts(12_632_221, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Common CollectionPropertyPermissions (r:1 w:0) @@ -435,10 +435,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 320_000 picoseconds. - Weight::from_parts(350_000, 36269) - // Standard Error: 24_256 - .saturating_add(Weight::from_parts(8_471_382, 0).saturating_mul(b.into())) + // Minimum execution time: 1_014_000 picoseconds. + Weight::from_parts(1_065_000, 36269) + // Standard Error: 39_536 + .saturating_add(Weight::from_parts(24_125_838, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -450,8 +450,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 5_600_000 picoseconds. - Weight::from_parts(5_850_000, 3554) + // Minimum execution time: 10_315_000 picoseconds. + Weight::from_parts(10_601_000, 3554) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -461,8 +461,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 2_430_000 picoseconds. - Weight::from_parts(2_590_000, 6118) + // Minimum execution time: 4_898_000 picoseconds. + Weight::from_parts(5_136_000, 6118) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -471,8 +471,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_970_000 picoseconds. - Weight::from_parts(2_050_000, 0) + // Minimum execution time: 4_146_000 picoseconds. + Weight::from_parts(4_337_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -481,8 +481,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 1_140_000 picoseconds. - Weight::from_parts(1_240_000, 3576) + // Minimum execution time: 2_170_000 picoseconds. + Weight::from_parts(2_301_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -491,8 +491,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_060_000, 36269) + // Minimum execution time: 2_098_000 picoseconds. + Weight::from_parts(2_251_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -514,8 +514,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 5_410_000 picoseconds. - Weight::from_parts(5_630_000, 3530) + // Minimum execution time: 11_341_000 picoseconds. + Weight::from_parts(11_741_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -534,10 +534,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 1_210_000 picoseconds. - Weight::from_parts(1_280_000, 3530) - // Standard Error: 1_172 - .saturating_add(Weight::from_parts(3_145_627, 0).saturating_mul(b.into())) + // Minimum execution time: 2_665_000 picoseconds. + Weight::from_parts(2_791_000, 3530) + // Standard Error: 996 + .saturating_add(Weight::from_parts(4_343_736, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -557,10 +557,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_230_000 picoseconds. - Weight::from_parts(1_310_000, 3481) - // Standard Error: 1_385 - .saturating_add(Weight::from_parts(3_942_825, 0).saturating_mul(b.into())) + // Minimum execution time: 2_616_000 picoseconds. + Weight::from_parts(2_726_000, 3481) + // Standard Error: 665 + .saturating_add(Weight::from_parts(5_554_066, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -582,10 +582,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_610_000 picoseconds. - Weight::from_parts(1_650_000, 3481) - // Standard Error: 1_090 - .saturating_add(Weight::from_parts(3_125_098, 0).saturating_mul(b.into())) + // Minimum execution time: 3_697_000 picoseconds. + Weight::from_parts(2_136_481, 3481) + // Standard Error: 567 + .saturating_add(Weight::from_parts(4_390_621, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -604,8 +604,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 12_180_000 picoseconds. - Weight::from_parts(12_580_000, 8682) + // Minimum execution time: 22_859_000 picoseconds. + Weight::from_parts(23_295_000, 8682) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -625,8 +625,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 12_230_000 picoseconds. - Weight::from_parts(12_550_000, 3554) + // Minimum execution time: 21_477_000 picoseconds. + Weight::from_parts(22_037_000, 3554) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -638,8 +638,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 7_630_000 picoseconds. - Weight::from_parts(7_919_000, 6118) + // Minimum execution time: 13_714_000 picoseconds. + Weight::from_parts(14_050_000, 6118) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -655,8 +655,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 8_840_000 picoseconds. - Weight::from_parts(9_080_000, 6118) + // Minimum execution time: 15_879_000 picoseconds. + Weight::from_parts(16_266_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -672,8 +672,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 10_270_000 picoseconds. - Weight::from_parts(10_670_000, 6118) + // Minimum execution time: 18_186_000 picoseconds. + Weight::from_parts(18_682_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -689,8 +689,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 10_199_000 picoseconds. - Weight::from_parts(10_479_000, 6118) + // Minimum execution time: 17_943_000 picoseconds. + Weight::from_parts(18_333_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -702,8 +702,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 4_750_000 picoseconds. - Weight::from_parts(4_910_000, 3554) + // Minimum execution time: 8_391_000 picoseconds. + Weight::from_parts(8_637_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -715,8 +715,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 4_700_000 picoseconds. - Weight::from_parts(4_840_000, 3554) + // Minimum execution time: 8_519_000 picoseconds. + Weight::from_parts(8_760_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -730,8 +730,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 11_419_000 picoseconds. - Weight::from_parts(11_800_000, 6118) + // Minimum execution time: 19_554_000 picoseconds. + Weight::from_parts(20_031_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -749,8 +749,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 12_440_000 picoseconds. - Weight::from_parts(12_740_000, 6118) + // Minimum execution time: 21_338_000 picoseconds. + Weight::from_parts(21_803_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -768,8 +768,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 14_030_000 picoseconds. - Weight::from_parts(14_380_000, 6118) + // Minimum execution time: 24_179_000 picoseconds. + Weight::from_parts(24_647_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -787,8 +787,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 14_200_000 picoseconds. - Weight::from_parts(14_560_000, 6118) + // Minimum execution time: 24_008_000 picoseconds. + Weight::from_parts(24_545_000, 6118) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -810,8 +810,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 16_590_000 picoseconds. - Weight::from_parts(17_079_000, 3570) + // Minimum execution time: 27_907_000 picoseconds. + Weight::from_parts(28_489_000, 3570) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -822,10 +822,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 560_000 picoseconds. - Weight::from_parts(590_000, 20191) - // Standard Error: 20_539 - .saturating_add(Weight::from_parts(5_317_277, 0).saturating_mul(b.into())) + // Minimum execution time: 1_460_000 picoseconds. + Weight::from_parts(1_564_000, 20191) + // Standard Error: 14_117 + .saturating_add(Weight::from_parts(8_196_214, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -840,10 +840,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 340_000 picoseconds. - Weight::from_parts(4_825_296, 36269) - // Standard Error: 4_846 - .saturating_add(Weight::from_parts(2_632_821, 0).saturating_mul(b.into())) + // Minimum execution time: 1_012_000 picoseconds. + Weight::from_parts(1_081_000, 36269) + // Standard Error: 6_838 + .saturating_add(Weight::from_parts(5_801_181, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -854,10 +854,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 70_000 picoseconds. - Weight::from_parts(70_000, 0) - // Standard Error: 2_858 - .saturating_add(Weight::from_parts(2_259_760, 0).saturating_mul(b.into())) + // Minimum execution time: 229_000 picoseconds. + Weight::from_parts(253_000, 0) + // Standard Error: 100_218 + .saturating_add(Weight::from_parts(12_632_221, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Common CollectionPropertyPermissions (r:1 w:0) @@ -871,10 +871,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 320_000 picoseconds. - Weight::from_parts(350_000, 36269) - // Standard Error: 24_256 - .saturating_add(Weight::from_parts(8_471_382, 0).saturating_mul(b.into())) + // Minimum execution time: 1_014_000 picoseconds. + Weight::from_parts(1_065_000, 36269) + // Standard Error: 39_536 + .saturating_add(Weight::from_parts(24_125_838, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -886,8 +886,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 5_600_000 picoseconds. - Weight::from_parts(5_850_000, 3554) + // Minimum execution time: 10_315_000 picoseconds. + Weight::from_parts(10_601_000, 3554) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -897,8 +897,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 2_430_000 picoseconds. - Weight::from_parts(2_590_000, 6118) + // Minimum execution time: 4_898_000 picoseconds. + Weight::from_parts(5_136_000, 6118) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -907,8 +907,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_970_000 picoseconds. - Weight::from_parts(2_050_000, 0) + // Minimum execution time: 4_146_000 picoseconds. + Weight::from_parts(4_337_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -917,8 +917,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 1_140_000 picoseconds. - Weight::from_parts(1_240_000, 3576) + // Minimum execution time: 2_170_000 picoseconds. + Weight::from_parts(2_301_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -927,8 +927,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_060_000, 36269) + // Minimum execution time: 2_098_000 picoseconds. + Weight::from_parts(2_251_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } From 74a28c8df90f82738bd2d5148360274fc77b68a3 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Sat, 30 Sep 2023 20:15:16 +0200 Subject: [PATCH 069/143] chore: calibrate weight2fee --- primitives/common/src/constants.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/common/src/constants.rs b/primitives/common/src/constants.rs index b290e82add..390184190d 100644 --- a/primitives/common/src/constants.rs +++ b/primitives/common/src/constants.rs @@ -52,10 +52,10 @@ pub const MAX_COLLATORS: u32 = 10; pub const SESSION_LENGTH: BlockNumber = HOURS; // Targeting 0.1 UNQ per transfer -pub const WEIGHT_TO_FEE_COEFF: u64 = /**/77_300_265_101_007_172/**/; +pub const WEIGHT_TO_FEE_COEFF: u64 = /**/77_334_604_063_436_322/**/; // Targeting 0.15 UNQ per transfer via ETH -pub const MIN_GAS_PRICE: u64 = /**/1_920_221_209_483/**/; +pub const MIN_GAS_PRICE: u64 = /**/1_920_639_188_722/**/; /// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. /// This is used to limit the maximal weight of a single extrinsic. From 716b83bf89e3880ecd74ae2a2f07114f5dc8de2c Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 13:32:13 +0200 Subject: [PATCH 070/143] fix: rename makeUnpaidTransactProgram --- tests/src/xcm/xcm.types.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/src/xcm/xcm.types.ts b/tests/src/xcm/xcm.types.ts index 630027b9ae..de96deb2ea 100644 --- a/tests/src/xcm/xcm.types.ts +++ b/tests/src/xcm/xcm.types.ts @@ -525,7 +525,7 @@ export class XcmTestHelper { if(variant == 'plain') { const kv = relayForceKV(); return { - program: helper.arrange.makeTransactProgram({ + program: helper.arrange.makeUnpaidTransactProgram({ weightMultiplier: 1, call: kv.call, }), @@ -537,7 +537,7 @@ export class XcmTestHelper { const batchCall = helper.constructApiCall(`api.tx.utility.${variant}`, [[kv0.call, kv1.call]]).method.toHex(); return { - program: helper.arrange.makeTransactProgram({ + program: helper.arrange.makeUnpaidTransactProgram({ weightMultiplier: 2, call: batchCall, }), @@ -591,7 +591,7 @@ export class XcmTestHelper { } return { - program: helper.arrange.makeTransactProgram({ + program: helper.arrange.makeUnpaidTransactProgram({ weightMultiplier: 1, call, }), From a6308d9e801153b4c27ed3d1288a1b3d8b930ad3 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 13:33:03 +0200 Subject: [PATCH 071/143] fix: rename makeUnpaidSudoTransactProgram --- tests/src/util/playgrounds/unique.dev.ts | 2 +- tests/src/xcm/xcm.types.ts | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/src/util/playgrounds/unique.dev.ts b/tests/src/util/playgrounds/unique.dev.ts index 10569d9658..57c54e8227 100644 --- a/tests/src/util/playgrounds/unique.dev.ts +++ b/tests/src/util/playgrounds/unique.dev.ts @@ -981,7 +981,7 @@ export class ArrangeGroup { }; } - makeTransactProgram(info: {weightMultiplier: number, call: string}) { + makeUnpaidSudoTransactProgram(info: {weightMultiplier: number, call: string}) { return { V3: [ { diff --git a/tests/src/xcm/xcm.types.ts b/tests/src/xcm/xcm.types.ts index de96deb2ea..a5509afab2 100644 --- a/tests/src/xcm/xcm.types.ts +++ b/tests/src/xcm/xcm.types.ts @@ -525,7 +525,7 @@ export class XcmTestHelper { if(variant == 'plain') { const kv = relayForceKV(); return { - program: helper.arrange.makeUnpaidTransactProgram({ + program: helper.arrange.makeUnpaidSudoTransactProgram({ weightMultiplier: 1, call: kv.call, }), @@ -537,7 +537,7 @@ export class XcmTestHelper { const batchCall = helper.constructApiCall(`api.tx.utility.${variant}`, [[kv0.call, kv1.call]]).method.toHex(); return { - program: helper.arrange.makeUnpaidTransactProgram({ + program: helper.arrange.makeUnpaidSudoTransactProgram({ weightMultiplier: 2, call: batchCall, }), @@ -591,7 +591,7 @@ export class XcmTestHelper { } return { - program: helper.arrange.makeUnpaidTransactProgram({ + program: helper.arrange.makeUnpaidSudoTransactProgram({ weightMultiplier: 1, call, }), From d4981bc1ee11bc7c8116b883a78503e335bf8162 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 14:01:57 +0200 Subject: [PATCH 072/143] fix: explaning comment about SetTopic relay router --- tests/src/xcm/lowLevelXcmQuartz.test.ts | 2 +- tests/src/xcm/lowLevelXcmUnique.test.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/src/xcm/lowLevelXcmQuartz.test.ts b/tests/src/xcm/lowLevelXcmQuartz.test.ts index 0bee550650..42c5716897 100644 --- a/tests/src/xcm/lowLevelXcmQuartz.test.ts +++ b/tests/src/xcm/lowLevelXcmQuartz.test.ts @@ -314,7 +314,7 @@ describeXCM('[XCMLL] Integration test: The relay can do some root ops', () => { // At the moment there is no reliable way // to establish the correspondence between the `ExecutedDownward` event // and the relay's sent message due to `SetTopic` instruction - // containing an unpredictable topic silently added by the relay on the router level. + // containing an unpredictable topic silently added by the relay's messages on the router level. // This changes the message hash on arrival to our chain. // // See: diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts index 1cbafd4639..0a8f657b05 100644 --- a/tests/src/xcm/lowLevelXcmUnique.test.ts +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -378,7 +378,7 @@ describeXCM('[XCMLL] Integration test: The relay can do some root ops', () => { // At the moment there is no reliable way // to establish the correspondence between the `ExecutedDownward` event // and the relay's sent message due to `SetTopic` instruction - // containing an unpredictable topic silently added by the relay on the router level. + // containing an unpredictable topic silently added by the relay's messages on the router level. // This changes the message hash on arrival to our chain. // // See: From cefeef50d97a7c7b6c6344947b31c53bcbf4532c Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 14:15:57 +0200 Subject: [PATCH 073/143] fix: make _uniqueChainMultilocationForRelay private --- tests/src/xcm/xcm.types.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/src/xcm/xcm.types.ts b/tests/src/xcm/xcm.types.ts index a5509afab2..e6eedf8ae7 100644 --- a/tests/src/xcm/xcm.types.ts +++ b/tests/src/xcm/xcm.types.ts @@ -186,7 +186,7 @@ export class XcmTestHelper { }; } - uniqueChainMultilocationForRelay() { + private _uniqueChainMultilocationForRelay() { return { V3: { parents: 0, @@ -552,7 +552,7 @@ export class XcmTestHelper { await usingRelayPlaygrounds(relayUrl, async (helper) => { await helper.getSudo().executeExtrinsic(relaySudoer, 'api.tx.xcmPallet.send', [ - this.uniqueChainMultilocationForRelay(), + this._uniqueChainMultilocationForRelay(), program, ]); }); @@ -608,7 +608,7 @@ export class XcmTestHelper { await usingRelayPlaygrounds(relayUrl, async (helper) => { await helper.getSudo().executeExtrinsic(relaySudoer, 'api.tx.xcmPallet.send', [ - this.uniqueChainMultilocationForRelay(), + this._uniqueChainMultilocationForRelay(), program, ]); }); From 17703e6a9d581279601cc75ee45cc594001bb2d6 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 14:27:15 +0200 Subject: [PATCH 074/143] fix: set correct shiden/astar values for our chain --- tests/src/util/playgrounds/unique.xcm.ts | 8 ++++---- tests/src/xcm/lowLevelXcmQuartz.test.ts | 13 +++++-------- tests/src/xcm/lowLevelXcmUnique.test.ts | 11 +++++------ tests/src/xcm/xcmQuartz.test.ts | 11 +++++------ tests/src/xcm/xcmUnique.test.ts | 11 +++++------ 5 files changed, 24 insertions(+), 30 deletions(-) diff --git a/tests/src/util/playgrounds/unique.xcm.ts b/tests/src/util/playgrounds/unique.xcm.ts index 12d736ae6c..a980c2c99c 100644 --- a/tests/src/util/playgrounds/unique.xcm.ts +++ b/tests/src/util/playgrounds/unique.xcm.ts @@ -240,19 +240,19 @@ export class TokensGroup extends HelperGroup { } export class AssetsGroup extends HelperGroup { - async create(signer: TSigner, assetId: number, admin: string, minimalBalance: bigint) { + async create(signer: TSigner, assetId: number | bigint, admin: string, minimalBalance: bigint) { await this.helper.executeExtrinsic(signer, 'api.tx.assets.create', [assetId, admin, minimalBalance], true); } - async setMetadata(signer: TSigner, assetId: number, name: string, symbol: string, decimals: number) { + async setMetadata(signer: TSigner, assetId: number | bigint, name: string, symbol: string, decimals: number) { await this.helper.executeExtrinsic(signer, 'api.tx.assets.setMetadata', [assetId, name, symbol, decimals], true); } - async mint(signer: TSigner, assetId: number, beneficiary: string, amount: bigint) { + async mint(signer: TSigner, assetId: number | bigint, beneficiary: string, amount: bigint) { await this.helper.executeExtrinsic(signer, 'api.tx.assets.mint', [assetId, beneficiary, amount], true); } - async account(assetId: string | number, address: string) { + async account(assetId: string | number | bigint, address: string) { const accountAsset = ( await this.helper.callRpc('api.query.assets.account', [assetId, address]) ).toJSON()! as any; diff --git a/tests/src/xcm/lowLevelXcmQuartz.test.ts b/tests/src/xcm/lowLevelXcmQuartz.test.ts index 42c5716897..23139e56c9 100644 --- a/tests/src/xcm/lowLevelXcmQuartz.test.ts +++ b/tests/src/xcm/lowLevelXcmQuartz.test.ts @@ -222,14 +222,12 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Shiden', () => { let alice: IKeyringPair; let randomAccount: IKeyringPair; - const QTZ_ASSET_ID_ON_SHIDEN = 1; - const QTZ_MINIMAL_BALANCE_ON_SHIDEN = 1n; + const QTZ_ASSET_ID_ON_SHIDEN = 18_446_744_073_709_551_633n; // The value is taken from the live Shiden + const QTZ_MINIMAL_BALANCE_ON_SHIDEN = 1n; // The value is taken from the live Shiden // Quartz -> Shiden const shidenInitialBalance = 1n * (10n ** SHIDEN_DECIMALS); // 1 SHD, existential deposit required to actually create the account on Shiden - const unitsPerSecond = 228_000_000_000n; // This is Phala's value. What will be ours? - - + const unitsPerSecond = 500_451_000_000_000_000_000n; // The value is taken from the live Shiden before(async () => { await usingPlaygrounds(async (helper, privateKey) => { @@ -245,7 +243,6 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Shiden', () => { await usingShidenPlaygrounds(shidenUrl, async (helper) => { if(!(await helper.callRpc('api.query.assets.asset', [QTZ_ASSET_ID_ON_SHIDEN])).toJSON()) { console.log('1. Create foreign asset and metadata'); - // TODO update metadata with values from production await helper.assets.create( alice, QTZ_ASSET_ID_ON_SHIDEN, @@ -256,8 +253,8 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Shiden', () => { await helper.assets.setMetadata( alice, QTZ_ASSET_ID_ON_SHIDEN, - 'Cross chain QTZ', - 'xcQTZ', + 'Quartz', + 'QTZ', Number(QTZ_DECIMALS), ); diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts index 0a8f657b05..b36b17dfb9 100644 --- a/tests/src/xcm/lowLevelXcmUnique.test.ts +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -288,12 +288,12 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Astar', () => { let alice: IKeyringPair; let randomAccount: IKeyringPair; - const UNQ_ASSET_ID_ON_ASTAR = 1; - const UNQ_MINIMAL_BALANCE_ON_ASTAR = 1n; + const UNQ_ASSET_ID_ON_ASTAR = 18_446_744_073_709_551_631n; // The value is taken from the live Astar + const UNQ_MINIMAL_BALANCE_ON_ASTAR = 1n; // The value is taken from the live Astar // Unique -> Astar const astarInitialBalance = 1n * (10n ** ASTAR_DECIMALS); // 1 ASTR, existential deposit required to actually create the account on Astar. - const unitsPerSecond = 228_000_000_000n; // This is Phala's value. What will be ours? + const unitsPerSecond = 9_451_000_000_000_000_000n; // The value is taken from the live Astar before(async () => { await usingPlaygrounds(async (helper, privateKey) => { @@ -309,7 +309,6 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Astar', () => { await usingAstarPlaygrounds(astarUrl, async (helper) => { if(!(await helper.callRpc('api.query.assets.asset', [UNQ_ASSET_ID_ON_ASTAR])).toJSON()) { console.log('1. Create foreign asset and metadata'); - // TODO update metadata with values from production await helper.assets.create( alice, UNQ_ASSET_ID_ON_ASTAR, @@ -320,8 +319,8 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Astar', () => { await helper.assets.setMetadata( alice, UNQ_ASSET_ID_ON_ASTAR, - 'Cross chain UNQ', - 'xcUNQ', + 'Unique Network', + 'UNQ', Number(UNQ_DECIMALS), ); diff --git a/tests/src/xcm/xcmQuartz.test.ts b/tests/src/xcm/xcmQuartz.test.ts index 41b3c0aae6..92e4310e89 100644 --- a/tests/src/xcm/xcmQuartz.test.ts +++ b/tests/src/xcm/xcmQuartz.test.ts @@ -1283,12 +1283,12 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Shiden', () => { let alice: IKeyringPair; let sender: IKeyringPair; - const QTZ_ASSET_ID_ON_SHIDEN = 1; - const QTZ_MINIMAL_BALANCE_ON_SHIDEN = 1n; + const QTZ_ASSET_ID_ON_SHIDEN = 18_446_744_073_709_551_633n; // The value is taken from the live Shiden + const QTZ_MINIMAL_BALANCE_ON_SHIDEN = 1n; // The value is taken from the live Shiden // Quartz -> Shiden const shidenInitialBalance = 1n * (10n ** SHIDEN_DECIMALS); // 1 SHD, existential deposit required to actually create the account on Shiden - const unitsPerSecond = 228_000_000_000n; // This is Phala's value. What will be ours? + const unitsPerSecond = 500_451_000_000_000_000_000n; // The value is taken from the live Shiden const qtzToShidenTransferred = 10n * (10n ** QTZ_DECIMALS); // 10 QTZ const qtzToShidenArrived = 9_999_999_999_088_000_000n; // 9.999 ... QTZ, Shiden takes a commision in foreign tokens @@ -1311,7 +1311,6 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Shiden', () => { await usingShidenPlaygrounds(shidenUrl, async (helper) => { if(!(await helper.callRpc('api.query.assets.asset', [QTZ_ASSET_ID_ON_SHIDEN])).toJSON()) { console.log('1. Create foreign asset and metadata'); - // TODO update metadata with values from production await helper.assets.create( alice, QTZ_ASSET_ID_ON_SHIDEN, @@ -1322,8 +1321,8 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Shiden', () => { await helper.assets.setMetadata( alice, QTZ_ASSET_ID_ON_SHIDEN, - 'Cross chain QTZ', - 'xcQTZ', + 'Quartz', + 'QTZ', Number(QTZ_DECIMALS), ); diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index 531d4f519f..b720914581 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -1511,12 +1511,12 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { let alice: IKeyringPair; let randomAccount: IKeyringPair; - const UNQ_ASSET_ID_ON_ASTAR = 1; - const UNQ_MINIMAL_BALANCE_ON_ASTAR = 1n; + const UNQ_ASSET_ID_ON_ASTAR = 18_446_744_073_709_551_631n; // The value is taken from the live Astar + const UNQ_MINIMAL_BALANCE_ON_ASTAR = 1n; // The value is taken from the live Astar // Unique -> Astar const astarInitialBalance = 1n * (10n ** ASTAR_DECIMALS); // 1 ASTR, existential deposit required to actually create the account on Astar. - const unitsPerSecond = 228_000_000_000n; // This is Phala's value. What will be ours? + const unitsPerSecond = 9_451_000_000_000_000_000n; // The value is taken from the live Astar const unqToAstarTransferred = 10n * (10n ** UNQ_DECIMALS); // 10 UNQ const unqToAstarArrived = 9_999_999_999_088_000_000n; // 9.999 ... UNQ, Astar takes a commision in foreign tokens @@ -1539,7 +1539,6 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { await usingAstarPlaygrounds(astarUrl, async (helper) => { if(!(await helper.callRpc('api.query.assets.asset', [UNQ_ASSET_ID_ON_ASTAR])).toJSON()) { console.log('1. Create foreign asset and metadata'); - // TODO update metadata with values from production await helper.assets.create( alice, UNQ_ASSET_ID_ON_ASTAR, @@ -1550,8 +1549,8 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { await helper.assets.setMetadata( alice, UNQ_ASSET_ID_ON_ASTAR, - 'Cross chain UNQ', - 'xcUNQ', + 'Unique Network', + 'UNQ', Number(UNQ_DECIMALS), ); From 116f1453edc77ddb9a26915984382079029e9e4d Mon Sep 17 00:00:00 2001 From: Pavel Orlov <45266194+PraetorP@users.noreply.github.com> Date: Mon, 2 Oct 2023 12:36:35 +0000 Subject: [PATCH 075/143] Update tests/src/xcm/xcm.types.ts Co-authored-by: Daniel Shiposha --- tests/src/xcm/xcm.types.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/src/xcm/xcm.types.ts b/tests/src/xcm/xcm.types.ts index e6eedf8ae7..ea4baf402b 100644 --- a/tests/src/xcm/xcm.types.ts +++ b/tests/src/xcm/xcm.types.ts @@ -155,7 +155,7 @@ export class XcmTestHelper { switch (this._nativeRuntime) { case 'opal': // To-Do - return 10; + return 1001; case 'quartz': return QUARTZ_CHAIN; case 'unique': From 0243f65534e44747ff7303a550902b0732be410d Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 2 Oct 2023 13:00:08 +0000 Subject: [PATCH 076/143] refactor(xcm test): rename `reserveTransferUNQfrom` --- tests/src/xcm/lowLevelXcmQuartz.test.ts | 4 ++-- tests/src/xcm/lowLevelXcmUnique.test.ts | 8 ++++---- tests/src/xcm/xcm.types.ts | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/src/xcm/lowLevelXcmQuartz.test.ts b/tests/src/xcm/lowLevelXcmQuartz.test.ts index 23139e56c9..6f06ed760d 100644 --- a/tests/src/xcm/lowLevelXcmQuartz.test.ts +++ b/tests/src/xcm/lowLevelXcmQuartz.test.ts @@ -214,7 +214,7 @@ describeXCM('[XCMLL] Integration test: Exchanging QTZ with Moonriver', () => { }); itSub('Should not accept reserve transfer of QTZ from Moonriver', async () => { - await testHelper.reserveTransferUNQfrom('moonriver', alice); + await testHelper.rejectReserveTransferUNQfrom('moonriver', alice); }); }); @@ -295,7 +295,7 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Shiden', () => { }); itSub('Should not accept reserve transfer of QTZ from Shiden', async () => { - await testHelper.reserveTransferUNQfrom('shiden', alice); + await testHelper.rejectReserveTransferUNQfrom('shiden', alice); }); }); diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts index b36b17dfb9..f66432dab1 100644 --- a/tests/src/xcm/lowLevelXcmUnique.test.ts +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -87,7 +87,7 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Acala', () => { }); itSub('Should not accept reserve transfer of UNQ from Acala', async () => { - await testHelper.reserveTransferUNQfrom('acala', alice); + await testHelper.rejectReserveTransferUNQfrom('acala', alice); }); }); @@ -142,7 +142,7 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Polkadex', () => { }); itSub('Should not accept reserve transfer of UNQ from Polkadex', async () => { - await testHelper.reserveTransferUNQfrom('polkadex', alice); + await testHelper.rejectReserveTransferUNQfrom('polkadex', alice); }); }); @@ -280,7 +280,7 @@ describeXCM('[XCMLL] Integration test: Exchanging UNQ with Moonbeam', () => { }); itSub('Should not accept reserve transfer of UNQ from Moonbeam', async () => { - await testHelper.reserveTransferUNQfrom('moonbeam', alice); + await testHelper.rejectReserveTransferUNQfrom('moonbeam', alice); }); }); @@ -361,7 +361,7 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Astar', () => { }); itSub('Should not accept reserve transfer of UNQ from Astar', async () => { - await testHelper.reserveTransferUNQfrom('astar', alice); + await testHelper.rejectReserveTransferUNQfrom('astar', alice); }); }); diff --git a/tests/src/xcm/xcm.types.ts b/tests/src/xcm/xcm.types.ts index ea4baf402b..931b9f4985 100644 --- a/tests/src/xcm/xcm.types.ts +++ b/tests/src/xcm/xcm.types.ts @@ -385,7 +385,7 @@ export class XcmTestHelper { }); } - async reserveTransferUNQfrom(netwokrName: keyof typeof NETWORKS, sudoer: IKeyringPair) { + async rejectReserveTransferUNQfrom(netwokrName: keyof typeof NETWORKS, sudoer: IKeyringPair) { const networkUrl = mapToChainUrl(netwokrName); const targetPlayground = getDevPlayground(netwokrName); From 78c4f0c72874978b1f77680ff9c3dd59180e18ae Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 2 Oct 2023 13:08:54 +0000 Subject: [PATCH 077/143] fix(test xcm): typos --- tests/src/xcm/xcm.types.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/src/xcm/xcm.types.ts b/tests/src/xcm/xcm.types.ts index 931b9f4985..ae596fd4c9 100644 --- a/tests/src/xcm/xcm.types.ts +++ b/tests/src/xcm/xcm.types.ts @@ -385,9 +385,9 @@ export class XcmTestHelper { }); } - async rejectReserveTransferUNQfrom(netwokrName: keyof typeof NETWORKS, sudoer: IKeyringPair) { - const networkUrl = mapToChainUrl(netwokrName); - const targetPlayground = getDevPlayground(netwokrName); + async rejectReserveTransferUNQfrom(networkName: keyof typeof NETWORKS, sudoer: IKeyringPair) { + const networkUrl = mapToChainUrl(networkName); + const targetPlayground = getDevPlayground(networkName); await usingPlaygrounds(async (helper) => { const testAmount = 10_000n * (10n ** UNQ_DECIMALS); @@ -434,7 +434,7 @@ export class XcmTestHelper { const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [this._runtimeVersionedMultilocation(), maliciousXcmProgramFullId]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal(`${netwokrName} try to act like a reserve location for UNQ using path asset identification`,batchCall); + await helper.fastDemocracy.executeProposal(`${networkName} try to act like a reserve location for UNQ using path asset identification`,batchCall); maliciousXcmProgramFullIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } @@ -456,7 +456,7 @@ export class XcmTestHelper { const xcmSend = helper.constructApiCall('api.tx.polkadotXcm.send', [this._runtimeVersionedMultilocation(), maliciousXcmProgramHereId]); // Needed to bypass the call filter. const batchCall = helper.encodeApiCall('api.tx.utility.batch', [[xcmSend]]); - await helper.fastDemocracy.executeProposal(`${netwokrName} try to act like a reserve location for UNQ using "here" asset identification`, batchCall); + await helper.fastDemocracy.executeProposal(`${networkName} try to act like a reserve location for UNQ using "here" asset identification`, batchCall); maliciousXcmProgramHereIdSent = await helper.wait.expectEvent(maxWaitBlocks, Event.XcmpQueue.XcmpMessageSent); } From b4e7cb4fb12ab34617d623032aacdcc5ffb35526 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 2 Oct 2023 13:49:09 +0000 Subject: [PATCH 078/143] fix(tests xcm): remove vendor folder --- vendor/baedeker-library | 1 - 1 file changed, 1 deletion(-) delete mode 160000 vendor/baedeker-library diff --git a/vendor/baedeker-library b/vendor/baedeker-library deleted file mode 160000 index 9f1eca0cea..0000000000 --- a/vendor/baedeker-library +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 9f1eca0cea9f50ce8486f2a4b9db65892ea12c36 From b6b1e0f0607a6801a72b0c8ff43ac64cbb023a7a Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 16:02:56 +0200 Subject: [PATCH 079/143] feat: add cli param disable-autoseal-on-tx --- node/cli/src/cli.rs | 4 ++++ node/cli/src/command.rs | 2 +- node/cli/src/service.rs | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/node/cli/src/cli.rs b/node/cli/src/cli.rs index e9db9fd983..54f8975fb0 100644 --- a/node/cli/src/cli.rs +++ b/node/cli/src/cli.rs @@ -84,6 +84,10 @@ pub struct Cli { #[structopt(default_value = "500", long)] pub idle_autoseal_interval: u64, + /// Disable auto-sealing blocks on new transactions in the `--dev` mode. + #[structopt(long)] + pub disable_autoseal_on_tx: bool, + /// Disable automatic hardware benchmarks. /// /// By default these benchmarks are automatically ran at startup and measure diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index da2cddf72b..64b0548da6 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -488,7 +488,7 @@ pub fn run() -> Result<()> { config.state_pruning = Some(sc_service::PruningMode::ArchiveAll); return start_node_using_chain_runtime! { - start_dev_node(config, autoseal_interval).map_err(Into::into) + start_dev_node(config, autoseal_interval, cli.disable_autoseal_on_tx).map_err(Into::into) }; }; diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 02071640e3..63183991ba 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -886,6 +886,7 @@ pub struct OtherPartial { pub fn start_dev_node( config: Configuration, autoseal_interval: Duration, + disable_autoseal_on_tx: bool, ) -> sc_service::error::Result where Runtime: RuntimeInstance + Send + Sync + 'static, @@ -980,6 +981,7 @@ where .pool() .validated_pool() .import_notification_stream() + .filter(move |_| futures::future::ready(!disable_autoseal_on_tx)) .map(|_| EngineCommand::SealNewBlock { create_empty: true, finalize: false, // todo:collator finalize true From f78c9266d8456b563f4ea3c20c89763bc342086a Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 2 Oct 2023 14:16:17 +0000 Subject: [PATCH 080/143] Revert "fix(tests xcm): remove vendor folder" This reverts commit b4e7cb4fb12ab34617d623032aacdcc5ffb35526. --- vendor/baedeker-library | 1 + 1 file changed, 1 insertion(+) create mode 160000 vendor/baedeker-library diff --git a/vendor/baedeker-library b/vendor/baedeker-library new file mode 160000 index 0000000000..9f1eca0cea --- /dev/null +++ b/vendor/baedeker-library @@ -0,0 +1 @@ +Subproject commit 9f1eca0cea9f50ce8486f2a4b9db65892ea12c36 From 31072020f43ea32bf87136a27655ebe7b721fb0a Mon Sep 17 00:00:00 2001 From: PraetorP Date: Mon, 2 Oct 2023 14:17:01 +0000 Subject: [PATCH 081/143] fix(tests xcm): remove vendor folder --- vendor/baedeker-library | 1 - 1 file changed, 1 deletion(-) delete mode 160000 vendor/baedeker-library diff --git a/vendor/baedeker-library b/vendor/baedeker-library deleted file mode 160000 index 9f1eca0cea..0000000000 --- a/vendor/baedeker-library +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 9f1eca0cea9f50ce8486f2a4b9db65892ea12c36 From 184b30a86bc89ae1a2c7e1511a4a86d227a0619d Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 16:36:08 +0200 Subject: [PATCH 082/143] feat: finalization can be enabled in dev mode --- node/cli/src/cli.rs | 8 +++++++- node/cli/src/command.rs | 5 +---- node/cli/src/service.rs | 31 +++++++++++++++++++++++++------ 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/node/cli/src/cli.rs b/node/cli/src/cli.rs index 54f8975fb0..3a9b612cb1 100644 --- a/node/cli/src/cli.rs +++ b/node/cli/src/cli.rs @@ -80,7 +80,7 @@ pub struct Cli { /// an empty block will be sealed automatically /// after the `--idle-autoseal-interval` milliseconds. /// - /// The default interval is 500 milliseconds + /// The default interval is 500 milliseconds. #[structopt(default_value = "500", long)] pub idle_autoseal_interval: u64, @@ -88,6 +88,12 @@ pub struct Cli { #[structopt(long)] pub disable_autoseal_on_tx: bool, + /// Finalization delay (in seconds) of auto-sealed blocks in the `--dev` mode. + /// + /// Disabled by default. + #[structopt(long)] + pub autoseal_finalization_delay: Option, + /// Disable automatic hardware benchmarks. /// /// By default these benchmarks are automatically ran at startup and measure diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 64b0548da6..3db301a27a 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -62,7 +62,6 @@ use sc_cli::{ use sc_service::config::{BasePath, PrometheusConfig}; use sp_core::hexdisplay::HexDisplay; use sp_runtime::traits::{AccountIdConversion, Block as BlockT}; -use std::{time::Duration}; use up_common::types::opaque::{Block, RuntimeId}; @@ -481,14 +480,12 @@ pub fn run() -> Result<()> { if is_dev_service { info!("Running Dev service"); - let autoseal_interval = Duration::from_millis(cli.idle_autoseal_interval); - let mut config = config; config.state_pruning = Some(sc_service::PruningMode::ArchiveAll); return start_node_using_chain_runtime! { - start_dev_node(config, autoseal_interval, cli.disable_autoseal_on_tx).map_err(Into::into) + start_dev_node(config, cli.idle_autoseal_interval, cli.autoseal_finalization_delay, cli.disable_autoseal_on_tx).map_err(Into::into) }; }; diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 63183991ba..b50c6e2fcb 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -169,9 +169,9 @@ pub struct AutosealInterval { } impl AutosealInterval { - pub fn new(config: &Configuration, interval: Duration) -> Self { + pub fn new(config: &Configuration, interval: u64) -> Self { let _tokio_runtime = config.tokio_handle.enter(); - let interval = tokio::time::interval(interval); + let interval = tokio::time::interval(Duration::from_millis(interval)); Self { interval } } @@ -885,7 +885,8 @@ pub struct OtherPartial { /// the parachain inherent pub fn start_dev_node( config: Configuration, - autoseal_interval: Duration, + autoseal_interval: u64, + autoseal_finalize_delay: Option, disable_autoseal_on_tx: bool, ) -> sc_service::error::Result where @@ -913,7 +914,10 @@ where + sp_consensus_aura::AuraApi, ExecutorDispatch: NativeExecutionDispatch + 'static, { - use sc_consensus_manual_seal::{run_manual_seal, EngineCommand, ManualSealParams}; + use sc_consensus_manual_seal::{ + run_manual_seal, run_delayed_finalize, EngineCommand, ManualSealParams, + DelayedFinalizeParams, + }; use fc_consensus::FrontierBlockImport; let sc_service::PartialComponents { @@ -984,18 +988,19 @@ where .filter(move |_| futures::future::ready(!disable_autoseal_on_tx)) .map(|_| EngineCommand::SealNewBlock { create_empty: true, - finalize: false, // todo:collator finalize true + finalize: false, parent_hash: None, sender: None, }), ); let autoseal_interval = Box::pin(AutosealInterval::new(&config, autoseal_interval)); + let idle_commands_stream: Box< dyn Stream> + Send + Sync + Unpin, > = Box::new(autoseal_interval.map(|_| EngineCommand::SealNewBlock { create_empty: true, - finalize: false, // todo:collator finalize true + finalize: false, parent_hash: None, sender: None, })); @@ -1005,6 +1010,20 @@ where let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let client_set_aside_for_cidp = client.clone(); + if let Some(delay_sec) = autoseal_finalize_delay { + let spawn_handle = task_manager.spawn_handle(); + + task_manager.spawn_essential_handle().spawn_blocking( + "finalization_task", + Some("block-authoring"), + run_delayed_finalize(DelayedFinalizeParams { + client: client.clone(), + delay_sec, + spawn_handle, + }), + ); + } + task_manager.spawn_essential_handle().spawn_blocking( "authorship_task", Some("block-authoring"), From 57b7f8c763eae893b12370e9984e58db468ac6de Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 17:02:27 +0200 Subject: [PATCH 083/143] fix: slightly fix broken pov-estimate --- runtime/common/runtime_apis.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/common/runtime_apis.rs b/runtime/common/runtime_apis.rs index a3d9bae08f..f38234968c 100644 --- a/runtime/common/runtime_apis.rs +++ b/runtime/common/runtime_apis.rs @@ -692,7 +692,7 @@ macro_rules! impl_common_runtime_apis { { use codec::Decode; - let uxt_decode = <::Extrinsic as Decode>::decode(&mut &uxt) + let uxt_decode = <::Extrinsic as Decode>::decode(&mut &*uxt) .map_err(|_| DispatchError::Other("failed to decode the extrinsic")); let uxt = match uxt_decode { From e105d72c172ccd5d3e25e0b23a293a1d74068dcf Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 17:46:01 +0200 Subject: [PATCH 084/143] fix: use config.ts for endpoints --- tests/src/migrations/942057-appPromotion/lockedToFreeze.ts | 3 ++- tests/src/migrations/correctStateAfterMaintenance.ts | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/src/migrations/942057-appPromotion/lockedToFreeze.ts b/tests/src/migrations/942057-appPromotion/lockedToFreeze.ts index 0f92e497c5..c568b2ff99 100644 --- a/tests/src/migrations/942057-appPromotion/lockedToFreeze.ts +++ b/tests/src/migrations/942057-appPromotion/lockedToFreeze.ts @@ -4,9 +4,10 @@ import {usingPlaygrounds} from '../../util'; import path, {dirname} from 'path'; import {isInteger, parse} from 'lossless-json'; import {fileURLToPath} from 'url'; +import config from '../../config'; -const WS_ENDPOINT = 'ws://localhost:9944'; +const WS_ENDPOINT = config.substrateUrl; const DONOR_SEED = '//Alice'; const UPDATE_IF_VERSION = 942057; diff --git a/tests/src/migrations/correctStateAfterMaintenance.ts b/tests/src/migrations/correctStateAfterMaintenance.ts index 4ce5126874..b313afef70 100644 --- a/tests/src/migrations/correctStateAfterMaintenance.ts +++ b/tests/src/migrations/correctStateAfterMaintenance.ts @@ -1,8 +1,9 @@ +import config from '../config'; import {usingPlaygrounds} from '../util'; -const WS_ENDPOINT = 'ws://127.0.0.1:9944'; +const WS_ENDPOINT = config.substrateUrl; const DONOR_SEED = '//Alice'; export const main = async(options: { wsEndpoint: string; donorSeed: string } = { From 5ef5e6b4aa6d4703d00a8d592c498ce01a4bb9d3 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 17:47:04 +0200 Subject: [PATCH 085/143] fix: make perf test seq and remove unused functions --- .../src/{performance.test.ts => performance.seq.test.ts} | 9 --------- 1 file changed, 9 deletions(-) rename tests/src/{performance.test.ts => performance.seq.test.ts} (96%) diff --git a/tests/src/performance.test.ts b/tests/src/performance.seq.test.ts similarity index 96% rename from tests/src/performance.test.ts rename to tests/src/performance.seq.test.ts index 1322bfd597..e049e217d9 100644 --- a/tests/src/performance.test.ts +++ b/tests/src/performance.seq.test.ts @@ -142,11 +142,6 @@ const tryMintExplicit = async (helper: UniqueHelper, signer: IKeyringPair, token return tokensCount; }; - -function sizeOfByteProperty(prop: IProperty) { - return sizeOfEncodedBytes(prop.key) + sizeOfEncodedBytes(prop.value!); -} - function sizeOfProperty(prop: IProperty) { return sizeOfEncodedStr(prop.key) + sizeOfEncodedStr(prop.value!); } @@ -169,7 +164,3 @@ function sizeOfEncodedStr(v: string) { const encoded = UTF8_ENCODER.encode(v); return sizeOfInt(encoded.length) + encoded.length; } - -function sizeOfEncodedBytes(bytes: Uint8Array | string) { - return sizeOfInt(bytes.length) + bytes.length; -} From eebd0e2c517b1f52e8cf673c0e98136ee9744918 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 18:04:09 +0200 Subject: [PATCH 086/143] fix: use OptionQuery for TokenProperties --- pallets/balances-adapter/src/common.rs | 14 ++++----- pallets/common/src/lib.rs | 19 ++++++------ pallets/fungible/src/common.rs | 14 ++++----- pallets/nonfungible/src/common.rs | 19 ++++++------ pallets/nonfungible/src/erc.rs | 3 +- pallets/nonfungible/src/lib.rs | 42 ++++---------------------- pallets/refungible/src/common.rs | 19 ++++++------ pallets/refungible/src/erc.rs | 3 +- pallets/refungible/src/lib.rs | 38 ++++------------------- 9 files changed, 57 insertions(+), 114 deletions(-) diff --git a/pallets/balances-adapter/src/common.rs b/pallets/balances-adapter/src/common.rs index 0dd4fd98bb..efda782c0f 100644 --- a/pallets/balances-adapter/src/common.rs +++ b/pallets/balances-adapter/src/common.rs @@ -172,18 +172,16 @@ impl CommonCollectionOperations for NativeFungibleHandle { fail!(>::UnsupportedOperation); } - fn get_token_properties_map(&self, _token_id: TokenId) -> up_data_structs::TokenProperties { - // No token properties are defined on fungibles - up_data_structs::TokenProperties::new() - } - - fn set_token_properties_map(&self, _token_id: TokenId, _map: up_data_structs::TokenProperties) { + fn get_token_properties_raw( + &self, + _token_id: TokenId, + ) -> Option { // No token properties are defined on fungibles + None } - fn properties_exist(&self, _token: TokenId) -> bool { + fn set_token_properties_raw(&self, _token_id: TokenId, _map: up_data_structs::TokenProperties) { // No token properties are defined on fungibles - false } fn set_token_property_permissions( diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index 53ec3d8452..fbbd29048a 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -2098,18 +2098,13 @@ pub trait CommonCollectionOperations { /// Get token properties raw map. /// /// * `token_id` - The token which properties are needed. - fn get_token_properties_map(&self, token_id: TokenId) -> TokenProperties; + fn get_token_properties_raw(&self, token_id: TokenId) -> Option; /// Set token properties raw map. /// /// * `token_id` - The token for which the properties are being set. /// * `map` - The raw map containing the token's properties. - fn set_token_properties_map(&self, token_id: TokenId, map: TokenProperties); - - /// Whether the given token has properties. - /// - /// * `token_id` - The token in question. - fn properties_exist(&self, token: TokenId) -> bool; + fn set_token_properties_raw(&self, token_id: TokenId, map: TokenProperties); /// Set token property permissions. /// @@ -2590,7 +2585,7 @@ impl< >::deposit_log(log); self.collection - .set_token_properties_map(token_id, stored_properties.into_inner()); + .set_token_properties_raw(token_id, stored_properties.into_inner()); } Ok(()) @@ -2624,7 +2619,7 @@ where true }, get_properties: |token_id| { - debug_assert!(!collection.properties_exist(token_id)); + debug_assert!(collection.get_token_properties_raw(token_id).is_none()); TokenProperties::new() }, _phantom: PhantomData, @@ -2686,7 +2681,11 @@ where is_collection_admin: LazyValue::new(|| collection.is_owner_or_admin(sender)), property_permissions: LazyValue::new(|| >::property_permissions(collection.id)), check_token_exist: |token_id| collection.token_exists(token_id), - get_properties: |token_id| collection.get_token_properties_map(token_id), + get_properties: |token_id| { + collection + .get_token_properties_raw(token_id) + .unwrap_or_default() + }, _phantom: PhantomData, } } diff --git a/pallets/fungible/src/common.rs b/pallets/fungible/src/common.rs index d6cf683914..e459d52de6 100644 --- a/pallets/fungible/src/common.rs +++ b/pallets/fungible/src/common.rs @@ -364,18 +364,16 @@ impl CommonCollectionOperations for FungibleHandle { fail!(>::SettingPropertiesNotAllowed) } - fn get_token_properties_map(&self, _token_id: TokenId) -> up_data_structs::TokenProperties { - // No token properties are defined on fungibles - up_data_structs::TokenProperties::new() - } - - fn set_token_properties_map(&self, _token_id: TokenId, _map: up_data_structs::TokenProperties) { + fn get_token_properties_raw( + &self, + _token_id: TokenId, + ) -> Option { // No token properties are defined on fungibles + None } - fn properties_exist(&self, _token: TokenId) -> bool { + fn set_token_properties_raw(&self, _token_id: TokenId, _map: up_data_structs::TokenProperties) { // No token properties are defined on fungibles - false } fn check_nesting( diff --git a/pallets/nonfungible/src/common.rs b/pallets/nonfungible/src/common.rs index c50f79ce02..4854cdf9bd 100644 --- a/pallets/nonfungible/src/common.rs +++ b/pallets/nonfungible/src/common.rs @@ -265,12 +265,15 @@ impl CommonCollectionOperations for NonfungibleHandle { ) } - fn get_token_properties_map(&self, token_id: TokenId) -> up_data_structs::TokenProperties { + fn get_token_properties_raw( + &self, + token_id: TokenId, + ) -> Option { >::get((self.id, token_id)) } - fn set_token_properties_map(&self, token_id: TokenId, map: up_data_structs::TokenProperties) { - >::set((self.id, token_id), map) + fn set_token_properties_raw(&self, token_id: TokenId, map: up_data_structs::TokenProperties) { + >::insert((self.id, token_id), map) } fn set_token_property_permissions( @@ -287,10 +290,6 @@ impl CommonCollectionOperations for NonfungibleHandle { ) } - fn properties_exist(&self, token: TokenId) -> bool { - >::contains_key((self.id, token)) - } - fn burn_item( &self, sender: T::CrossAccountId, @@ -482,13 +481,15 @@ impl CommonCollectionOperations for NonfungibleHandle { } fn token_property(&self, token_id: TokenId, key: &PropertyKey) -> Option { - >::token_properties((self.id, token_id)) + >::token_properties((self.id, token_id))? .get(key) .cloned() } fn token_properties(&self, token_id: TokenId, keys: Option>) -> Vec { - let properties = >::token_properties((self.id, token_id)); + let Some(properties) = >::token_properties((self.id, token_id)) else { + return vec![]; + }; keys.map(|keys| { keys.into_iter() diff --git a/pallets/nonfungible/src/erc.rs b/pallets/nonfungible/src/erc.rs index dc46fe926f..41ed9792d3 100644 --- a/pallets/nonfungible/src/erc.rs +++ b/pallets/nonfungible/src/erc.rs @@ -272,7 +272,8 @@ impl NonfungibleHandle { .try_into() .map_err(|_| "key too long")?; - let props = >::get((self.id, token_id)); + let props = + >::get((self.id, token_id)).ok_or("Token properties not found")?; let prop = props.get(&key).ok_or("key not found")?; Ok(prop.to_vec().into()) diff --git a/pallets/nonfungible/src/lib.rs b/pallets/nonfungible/src/lib.rs index 019ee2fa71..02ad2165f0 100644 --- a/pallets/nonfungible/src/lib.rs +++ b/pallets/nonfungible/src/lib.rs @@ -102,8 +102,8 @@ use frame_support::{ use up_data_structs::{ AccessMode, CollectionId, CustomDataLimit, TokenId, CreateCollectionData, CreateNftExData, mapping::TokenAddressMapping, budget::Budget, Property, PropertyKey, PropertyValue, - PropertyKeyPermission, PropertyScope, TrySetProperty, TokenChild, AuxPropertyValue, - PropertiesPermissionMap, TokenProperties as TokenPropertiesT, + PropertyKeyPermission, PropertyScope, TokenChild, AuxPropertyValue, PropertiesPermissionMap, + TokenProperties as TokenPropertiesT, }; use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; use pallet_common::{ @@ -201,7 +201,7 @@ pub mod pallet { pub type TokenProperties = StorageNMap< Key = (Key, Key), Value = TokenPropertiesT, - QueryKind = ValueQuery, + QueryKind = OptionQuery, >; /// Custom data of a token that is serialized to bytes, @@ -342,38 +342,6 @@ impl Pallet { >::contains_key((collection.id, token)) } - /// Set the token property with the scope. - /// - /// - `property`: Contains key-value pair. - pub fn set_scoped_token_property( - collection_id: CollectionId, - token_id: TokenId, - scope: PropertyScope, - property: Property, - ) -> DispatchResult { - TokenProperties::::try_mutate((collection_id, token_id), |properties| { - properties.try_scoped_set(scope, property.key, property.value) - }) - .map_err(>::from)?; - - Ok(()) - } - - /// Batch operation to set multiple properties with the same scope. - pub fn set_scoped_token_properties( - collection_id: CollectionId, - token_id: TokenId, - scope: PropertyScope, - properties: impl Iterator, - ) -> DispatchResult { - TokenProperties::::try_mutate((collection_id, token_id), |stored_properties| { - stored_properties.try_scoped_set_from_iter(scope, properties) - }) - .map_err(>::from)?; - - Ok(()) - } - /// Add or edit auxiliary data for the property. /// /// - `f`: function that adds or edits auxiliary data. @@ -1394,7 +1362,9 @@ impl Pallet { pub fn repair_item(collection: &NonfungibleHandle, token: TokenId) -> DispatchResult { >::mutate((collection.id, token), |properties| { - properties.recompute_consumed_space(); + if let Some(properties) = properties { + properties.recompute_consumed_space(); + } }); Ok(()) diff --git a/pallets/refungible/src/common.rs b/pallets/refungible/src/common.rs index 41f9dbf4d4..0d64f8deb8 100644 --- a/pallets/refungible/src/common.rs +++ b/pallets/refungible/src/common.rs @@ -435,16 +435,15 @@ impl CommonCollectionOperations for RefungibleHandle { ) } - fn get_token_properties_map(&self, token_id: TokenId) -> up_data_structs::TokenProperties { + fn get_token_properties_raw( + &self, + token_id: TokenId, + ) -> Option { >::get((self.id, token_id)) } - fn set_token_properties_map(&self, token_id: TokenId, map: up_data_structs::TokenProperties) { - >::set((self.id, token_id), map) - } - - fn properties_exist(&self, token: TokenId) -> bool { - >::contains_key((self.id, token)) + fn set_token_properties_raw(&self, token_id: TokenId, map: up_data_structs::TokenProperties) { + >::insert((self.id, token_id), map) } fn check_nesting( @@ -514,13 +513,15 @@ impl CommonCollectionOperations for RefungibleHandle { } fn token_property(&self, token_id: TokenId, key: &PropertyKey) -> Option { - >::token_properties((self.id, token_id)) + >::token_properties((self.id, token_id))? .get(key) .cloned() } fn token_properties(&self, token_id: TokenId, keys: Option>) -> Vec { - let properties = >::token_properties((self.id, token_id)); + let Some(properties) = >::token_properties((self.id, token_id)) else { + return vec![]; + }; keys.map(|keys| { keys.into_iter() diff --git a/pallets/refungible/src/erc.rs b/pallets/refungible/src/erc.rs index 3a706cf8ef..05ac3c6c62 100644 --- a/pallets/refungible/src/erc.rs +++ b/pallets/refungible/src/erc.rs @@ -283,7 +283,8 @@ impl RefungibleHandle { .try_into() .map_err(|_| "key too long")?; - let props = >::get((self.id, token_id)); + let props = + >::get((self.id, token_id)).ok_or("Token properties not found")?; let prop = props.get(&key).ok_or("key not found")?; Ok(prop.to_vec().into()) diff --git a/pallets/refungible/src/lib.rs b/pallets/refungible/src/lib.rs index 5482eb65b7..e9f4f9575e 100644 --- a/pallets/refungible/src/lib.rs +++ b/pallets/refungible/src/lib.rs @@ -106,8 +106,8 @@ use sp_std::{vec::Vec, vec, collections::btree_map::BTreeMap}; use up_data_structs::{ AccessMode, budget::Budget, CollectionId, CreateCollectionData, mapping::TokenAddressMapping, MAX_REFUNGIBLE_PIECES, Property, PropertyKey, PropertyKeyPermission, PropertyScope, - PropertyValue, TokenId, TrySetProperty, PropertiesPermissionMap, - CreateRefungibleExMultipleOwners, TokenOwnerError, TokenProperties as TokenPropertiesT, + PropertyValue, TokenId, PropertiesPermissionMap, CreateRefungibleExMultipleOwners, + TokenOwnerError, TokenProperties as TokenPropertiesT, }; pub use pallet::*; @@ -175,7 +175,7 @@ pub mod pallet { pub type TokenProperties = StorageNMap< Key = (Key, Key), Value = TokenPropertiesT, - QueryKind = ValueQuery, + QueryKind = OptionQuery, >; /// Total amount of pieces for token @@ -293,34 +293,6 @@ impl Pallet { pub fn token_exists(collection: &RefungibleHandle, token: TokenId) -> bool { >::contains_key((collection.id, token)) } - - pub fn set_scoped_token_property( - collection_id: CollectionId, - token_id: TokenId, - scope: PropertyScope, - property: Property, - ) -> DispatchResult { - TokenProperties::::try_mutate((collection_id, token_id), |properties| { - properties.try_scoped_set(scope, property.key, property.value) - }) - .map_err(>::from)?; - - Ok(()) - } - - pub fn set_scoped_token_properties( - collection_id: CollectionId, - token_id: TokenId, - scope: PropertyScope, - properties: impl Iterator, - ) -> DispatchResult { - TokenProperties::::try_mutate((collection_id, token_id), |stored_properties| { - stored_properties.try_scoped_set_from_iter(scope, properties) - }) - .map_err(>::from)?; - - Ok(()) - } } // unchecked calls skips any permission checks @@ -1426,7 +1398,9 @@ impl Pallet { pub fn repair_item(collection: &RefungibleHandle, token: TokenId) -> DispatchResult { >::mutate((collection.id, token), |properties| { - properties.recompute_consumed_space(); + if let Some(properties) = properties { + properties.recompute_consumed_space(); + } }); Ok(()) From 6b27551b8094f5dbd9cf5f9f1fbf3ce04e0cc7fc Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 18:32:55 +0200 Subject: [PATCH 087/143] fix: unit tests --- runtime/tests/src/tests.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/runtime/tests/src/tests.rs b/runtime/tests/src/tests.rs index db1d85fd83..73862f28aa 100644 --- a/runtime/tests/src/tests.rs +++ b/runtime/tests/src/tests.rs @@ -168,6 +168,7 @@ fn get_collection_properties(collection_id: CollectionId) -> Vec { fn get_token_properties(collection_id: CollectionId, token_id: TokenId) -> Vec { >::token_properties((collection_id, token_id)) + .unwrap_or_default() .into_iter() .map(|(key, value)| Property { key, value }) .collect() From 630fc898d4e48c8dabb22e1203af70caa4f3f3ae Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 2 Oct 2023 18:39:52 +0200 Subject: [PATCH 088/143] fix: use lowercase letters in EVM errors --- pallets/balances-adapter/src/erc.rs | 2 +- pallets/nonfungible/src/erc.rs | 18 +++++++++--------- pallets/refungible/src/erc.rs | 18 +++++++++--------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/pallets/balances-adapter/src/erc.rs b/pallets/balances-adapter/src/erc.rs index 03fa3c3e46..b58de1e747 100644 --- a/pallets/balances-adapter/src/erc.rs +++ b/pallets/balances-adapter/src/erc.rs @@ -25,7 +25,7 @@ impl NativeFungibleHandle { } fn approve(&mut self, _caller: Caller, _spender: Address, _amount: U256) -> Result { - Err("Approve not supported".into()) + Err("approve not supported".into()) } fn balance_of(&self, owner: Address) -> Result { diff --git a/pallets/nonfungible/src/erc.rs b/pallets/nonfungible/src/erc.rs index 41ed9792d3..5656474751 100644 --- a/pallets/nonfungible/src/erc.rs +++ b/pallets/nonfungible/src/erc.rs @@ -273,7 +273,7 @@ impl NonfungibleHandle { .map_err(|_| "key too long")?; let props = - >::get((self.id, token_id)).ok_or("Token properties not found")?; + >::get((self.id, token_id)).ok_or("token properties not found")?; let prop = props.get(&key).ok_or("key not found")?; Ok(prop.to_vec().into()) @@ -367,7 +367,7 @@ where .transpose() .map_err(|e| { Error::Revert(alloc::format!( - "Can not convert value \"baseURI\" to string with error \"{e}\"" + "can not convert value \"baseURI\" to string with error \"{e}\"" )) })?; @@ -658,7 +658,7 @@ impl NonfungibleHandle { let key = key::url(); let permission = get_token_permission::(self.id, &key)?; if !permission.collection_admin { - return Err("Operation is not allowed".into()); + return Err("operation is not allowed".into()); } let caller = T::CrossAccountId::from_eth(caller); @@ -685,7 +685,7 @@ impl NonfungibleHandle { .try_into() .map_err(|_| "token uri is too long")?, }) - .map_err(|e| Error::Revert(alloc::format!("Can't add property: {e:?}")))?; + .map_err(|e| Error::Revert(alloc::format!("can't add property: {e:?}")))?; >::create_item( self, @@ -708,12 +708,12 @@ fn get_token_property( ) -> Result { collection.consume_store_reads(1)?; let properties = >::try_get((collection.id, token_id)) - .map_err(|_| Error::Revert("Token properties not found".into()))?; + .map_err(|_| Error::Revert("token properties not found".into()))?; if let Some(property) = properties.get(key) { return Ok(String::from_utf8_lossy(property).into()); } - Err("Property tokenURI not found".into()) + Err("property tokenURI not found".into()) } fn get_token_permission( @@ -721,13 +721,13 @@ fn get_token_permission( key: &PropertyKey, ) -> Result { let token_property_permissions = CollectionPropertyPermissions::::try_get(collection_id) - .map_err(|_| Error::Revert("No permissions for collection".into()))?; + .map_err(|_| Error::Revert("no permissions for collection".into()))?; let a = token_property_permissions .get(key) .map(Clone::clone) .ok_or_else(|| { let key = String::from_utf8(key.clone().into_inner()).unwrap_or_default(); - Error::Revert(alloc::format!("No permission for key {key}")) + Error::Revert(alloc::format!("no permission for key {key}")) })?; Ok(a) } @@ -1058,7 +1058,7 @@ where .try_into() .map_err(|_| "token uri is too long")?, }) - .map_err(|e| Error::Revert(alloc::format!("Can't add property: {e:?}")))?; + .map_err(|e| Error::Revert(alloc::format!("can't add property: {e:?}")))?; data.push(CreateItemData:: { properties, diff --git a/pallets/refungible/src/erc.rs b/pallets/refungible/src/erc.rs index 05ac3c6c62..4323ead058 100644 --- a/pallets/refungible/src/erc.rs +++ b/pallets/refungible/src/erc.rs @@ -284,7 +284,7 @@ impl RefungibleHandle { .map_err(|_| "key too long")?; let props = - >::get((self.id, token_id)).ok_or("Token properties not found")?; + >::get((self.id, token_id)).ok_or("token properties not found")?; let prop = props.get(&key).ok_or("key not found")?; Ok(prop.to_vec().into()) @@ -372,7 +372,7 @@ where .transpose() .map_err(|e| { Error::Revert(alloc::format!( - "Can not convert value \"baseURI\" to string with error \"{e}\"" + "can not convert value \"baseURI\" to string with error \"{e}\"" )) })?; @@ -697,7 +697,7 @@ impl RefungibleHandle { let key = key::url(); let permission = get_token_permission::(self.id, &key)?; if !permission.collection_admin { - return Err("Operation is not allowed".into()); + return Err("operation is not allowed".into()); } let caller = T::CrossAccountId::from_eth(caller); @@ -724,7 +724,7 @@ impl RefungibleHandle { .try_into() .map_err(|_| "token uri is too long")?, }) - .map_err(|e| Error::Revert(alloc::format!("Can't add property: {e:?}")))?; + .map_err(|e| Error::Revert(alloc::format!("can't add property: {e:?}")))?; let users = [(to, 1)] .into_iter() @@ -749,12 +749,12 @@ fn get_token_property( ) -> Result { collection.consume_store_reads(1)?; let properties = >::try_get((collection.id, token_id)) - .map_err(|_| Error::Revert("Token properties not found".into()))?; + .map_err(|_| Error::Revert("token properties not found".into()))?; if let Some(property) = properties.get(key) { return Ok(String::from_utf8_lossy(property).into()); } - Err("Property tokenURI not found".into()) + Err("property tokenURI not found".into()) } fn get_token_permission( @@ -762,13 +762,13 @@ fn get_token_permission( key: &PropertyKey, ) -> Result { let token_property_permissions = CollectionPropertyPermissions::::try_get(collection_id) - .map_err(|_| Error::Revert("No permissions for collection".into()))?; + .map_err(|_| Error::Revert("no permissions for collection".into()))?; let a = token_property_permissions .get(key) .map(Clone::clone) .ok_or_else(|| { let key = String::from_utf8(key.clone().into_inner()).unwrap_or_default(); - Error::Revert(alloc::format!("No permission for key {key}")) + Error::Revert(alloc::format!("no permission for key {key}")) })?; Ok(a) } @@ -1133,7 +1133,7 @@ where .try_into() .map_err(|_| "token uri is too long")?, }) - .map_err(|e| Error::Revert(alloc::format!("Can't add property: {e:?}")))?; + .map_err(|e| Error::Revert(alloc::format!("can't add property: {e:?}")))?; let create_item_data = CreateItemData:: { users: users.clone(), From 4161c8ea6c50bd0e9b9dfabd8f44244298341970 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:06:30 +0200 Subject: [PATCH 089/143] refactor: drop legacy unique scheduler --- pallets/scheduler-v2/Cargo.toml | 45 - pallets/scheduler-v2/src/benchmarking.rs | 374 ------ pallets/scheduler-v2/src/lib.rs | 1338 ---------------------- pallets/scheduler-v2/src/mock.rs | 292 ----- pallets/scheduler-v2/src/tests.rs | 901 --------------- pallets/scheduler-v2/src/weights.rs | 234 ---- runtime/opal/Cargo.toml | 1 - test-pallets/utils/Cargo.toml | 3 - test-pallets/utils/src/lib.rs | 1 - 9 files changed, 3189 deletions(-) delete mode 100644 pallets/scheduler-v2/Cargo.toml delete mode 100644 pallets/scheduler-v2/src/benchmarking.rs delete mode 100644 pallets/scheduler-v2/src/lib.rs delete mode 100644 pallets/scheduler-v2/src/mock.rs delete mode 100644 pallets/scheduler-v2/src/tests.rs delete mode 100644 pallets/scheduler-v2/src/weights.rs diff --git a/pallets/scheduler-v2/Cargo.toml b/pallets/scheduler-v2/Cargo.toml deleted file mode 100644 index 09e30ef1f7..0000000000 --- a/pallets/scheduler-v2/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -authors = ["Unique Network "] -description = "Unique Scheduler pallet" -edition = "2021" -homepage = "https://unique.network" -license = "GPLv3" -name = "pallet-unique-scheduler-v2" -readme = "README.md" -repository = "https://github.com/UniqueNetwork/unique-chain" -version = "0.1.0" - -[dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - -frame-benchmarking = { workspace = true, optional = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -log = { workspace = true } -scale-info = { workspace = true } -sp-core = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } -sp-std = { workspace = true } - -[dev-dependencies] -pallet-preimage = { workspace = true } -substrate-test-utils = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } - -[features] -default = ["std"] -runtime-benchmarks = ["frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks"] -std = [ - "codec/std", - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", - "log/std", - "scale-info/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", -] -try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/scheduler-v2/src/benchmarking.rs b/pallets/scheduler-v2/src/benchmarking.rs deleted file mode 100644 index 6ef47aaf7a..0000000000 --- a/pallets/scheduler-v2/src/benchmarking.rs +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright 2019-2022 Unique Network (Gibraltar) Ltd. -// This file is part of Unique Network. - -// Unique Network is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Unique Network is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Unique Network. If not, see . - -// Original license: -// This file is part of Substrate. - -// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Scheduler pallet benchmarking. - -use super::*; -use frame_benchmarking::{account, benchmarks}; -use frame_support::{ - ensure, - traits::{schedule::Priority, PreimageRecipient}, -}; -use frame_system::RawOrigin; -use sp_std::{prelude::*, vec}; -use sp_io::hashing::blake2_256; - -use crate::{Pallet as Scheduler, ScheduledCall, EncodedCall}; -use frame_system::Call as SystemCall; - -const SEED: u32 = 0; - -const BLOCK_NUMBER: u32 = 2; - -/// Add `n` items to the schedule. -/// -/// For `resolved`: -/// - ` -/// - `None`: aborted (hash without preimage) -/// - `Some(true)`: hash resolves into call if possible, plain call otherwise -/// - `Some(false)`: plain call -fn fill_schedule(when: T::BlockNumber, n: u32) -> Result<(), &'static str> { - let t = DispatchTime::At(when); - let origin: ::PalletsOrigin = frame_system::RawOrigin::Root.into(); - for i in 0..n { - let call = make_call::(None); - let period = Some(((i + 100).into(), 100)); - let name = u32_to_name(i); - Scheduler::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; - } - ensure!( - Agenda::::get(when).agenda.len() == n as usize, - "didn't fill schedule" - ); - Ok(()) -} - -/// Generate a name for a scheduled task from an unsigned integer. -fn u32_to_name(i: u32) -> TaskName { - i.using_encoded(blake2_256) -} - -/// A utility for creating simple scheduled tasks. -/// -/// # Arguments -/// * `periodic` - makes the task periodic. -/// Sets the task's period and repetition count to `100`. -/// * `named` - gives a name to the task: `u32_to_name(0)`. -/// * `signed` - determines the origin of the task. -/// If true, it will have the Signed origin. Otherwise it will have the Root origin. -/// See [`make_origin`] for details. -/// * maybe_lookup_len - sets optional lookup length. It is used to benchmark task fetching from the `Preimages` store. -/// * priority - the task's priority. -fn make_task( - periodic: bool, - named: bool, - signed: bool, - maybe_lookup_len: Option, - priority: Priority, -) -> ScheduledOf { - let call = make_call::(maybe_lookup_len); - let maybe_periodic = match periodic { - true => Some((100u32.into(), 100)), - false => None, - }; - let maybe_id = match named { - true => Some(u32_to_name(0)), - false => None, - }; - let origin = make_origin::(signed); - Scheduled { - maybe_id, - priority, - call, - maybe_periodic, - origin, - _phantom: PhantomData, - } -} - -/// Creates a `SystemCall::remark` scheduled call with a given `len` in bytes. -/// Returns `None` if the call is too large to encode. -fn bounded(len: u32) -> Option> { - let call = <::RuntimeCall>::from(SystemCall::remark { - remark: vec![0; len as usize], - }); - ScheduledCall::new(call).ok() -} - -/// Creates a scheduled call and maximizes its size. -/// -/// If the `maybe_lookup_len` is not supplied, the task will create the maximal `Inline` scheduled call. -/// -/// Otherwise, the function will take the length value from the `maybe_lookup_len` -/// and find a minimal length value that ensures that the scheduled call will require a Preimage lookup. -fn make_call(maybe_lookup_len: Option) -> ScheduledCall { - let bound = EncodedCall::bound() as u32; - let mut len = match maybe_lookup_len { - Some(len) => { - len.clamp( - bound, - >::MaxSize::get() - 2, - ) - 3 - } - None => bound.saturating_sub(4), - }; - - loop { - let c = match bounded::(len) { - Some(x) => x, - None => { - len -= 1; - continue; - } - }; - if c.lookup_needed() == maybe_lookup_len.is_some() { - break c; - } - if maybe_lookup_len.is_some() { - len += 1; - } else if len > 0 { - len -= 1; - } else { - break c; - } - } -} - -/// Creates an origin for a scheduled call. -/// -/// If `signed` is true, it creates the Signed origin from a default account `account("origin", 0, SEED)`. -/// Otherwise, it creates the Root origin. -fn make_origin(signed: bool) -> ::PalletsOrigin { - match signed { - true => frame_system::RawOrigin::Signed(account("origin", 0, SEED)).into(), - false => frame_system::RawOrigin::Root.into(), - } -} - -/// Creates a dummy `WeightCounter` with the maximum possible weight limit. -fn dummy_counter() -> WeightCounter { - WeightCounter { - used: Weight::zero(), - limit: Weight::MAX, - } -} - -benchmarks! { - // `service_agendas` when no work is done. - // (multiple agendas - scheduled tasks in several blocks) - service_agendas_base { - let now = T::BlockNumber::from(BLOCK_NUMBER); - IncompleteSince::::put(now - One::one()); - }: { - Scheduler::::service_agendas(&mut dummy_counter(), now, 0); - } verify { - assert_eq!(IncompleteSince::::get(), Some(now - One::one())); - } - - // `service_agenda` when no work is done. - // (only one agenda - scheduled tasks in a single block) - service_agenda_base { - let now = BLOCK_NUMBER.into(); - let s in 0 .. T::MaxScheduledPerBlock::get(); - fill_schedule::(now, s)?; - let mut executed = 0; - }: { - Scheduler::::service_agenda(&mut dummy_counter(), &mut executed, now, now, 0); - } verify { - assert_eq!(executed, 0); - } - - // `service_task` when the task is a non-periodic, non-named, non-fetched call which is not - // dispatched (e.g. due to being overweight). - service_task_base { - let now = BLOCK_NUMBER.into(); - let task = make_task::(false, false, false, None, 0); - // prevent any tasks from actually being executed as we only want the surrounding weight. - let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { - //assert_eq!(result, Ok(())); - } - - // TODO uncomment if we will use the Preimages - // // `service_task` when the task is a non-periodic, non-named, fetched call (with a known - // // preimage length) and which is not dispatched (e.g. due to being overweight). - // service_task_fetched { - // let s in (EncodedCall::bound() as u32) .. (>::MaxSize::get()); - // let now = BLOCK_NUMBER.into(); - // let task = make_task::(false, false, false, Some(s), 0); - // // prevent any tasks from actually being executed as we only want the surrounding weight. - // let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; - // }: { - // let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - // } verify { - // } - - // `service_task` when the task is a non-periodic, named, non-fetched call which is not - // dispatched (e.g. due to being overweight). - service_task_named { - let now = BLOCK_NUMBER.into(); - let task = make_task::(false, true, false, None, 0); - // prevent any tasks from actually being executed as we only want the surrounding weight. - let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { - } - - // `service_task` when the task is a periodic, non-named, non-fetched call which is not - // dispatched (e.g. due to being overweight). - service_task_periodic { - let now = BLOCK_NUMBER.into(); - let task = make_task::(true, false, false, None, 0); - // prevent any tasks from actually being executed as we only want the surrounding weight. - let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { - } - - // `execute_dispatch` when the origin is `Signed`, not counting the dispatable's weight. - execute_dispatch_signed { - let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::MAX }; - let origin = make_origin::(true); - let call = T::Preimages::realize(&make_call::(None)).unwrap().0; - }: { - assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); - } - verify { - } - - // `execute_dispatch` when the origin is not `Signed`, not counting the dispatable's weight. - execute_dispatch_unsigned { - let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::MAX }; - let origin = make_origin::(false); - let call = T::Preimages::realize(&make_call::(None)).unwrap().0; - }: { - assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); - } - verify { - } - - schedule { - let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); - let when = BLOCK_NUMBER.into(); - let periodic = Some((T::BlockNumber::one(), 100)); - let priority = Some(0); - // Essentially a no-op call. - let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); - - fill_schedule::(when, s)?; - }: _(RawOrigin::Root, when, periodic, priority, call) - verify { - ensure!( - Agenda::::get(when).agenda.len() == (s + 1) as usize, - "didn't add to schedule" - ); - } - - cancel { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - - fill_schedule::(when, s)?; - assert_eq!(Agenda::::get(when).agenda.len(), s as usize); - }: _(RawOrigin::Root, when, 0) - verify { - ensure!( - Lookup::::get(u32_to_name(0)).is_none(), - "didn't remove from lookup" - ); - // Removed schedule is NONE - ensure!( - Agenda::::get(when).agenda[0].is_none(), - "didn't remove from schedule" - ); - } - - schedule_named { - let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); - let id = u32_to_name(s); - let when = BLOCK_NUMBER.into(); - let periodic = Some((T::BlockNumber::one(), 100)); - let priority = Some(0); - // Essentially a no-op call. - let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); - - fill_schedule::(when, s)?; - }: _(RawOrigin::Root, id, when, periodic, priority, call) - verify { - ensure!( - Agenda::::get(when).agenda.len() == (s + 1) as usize, - "didn't add to schedule" - ); - } - - cancel_named { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - - fill_schedule::(when, s)?; - }: _(RawOrigin::Root, u32_to_name(0)) - verify { - ensure!( - Lookup::::get(u32_to_name(0)).is_none(), - "didn't remove from lookup" - ); - // Removed schedule is NONE - ensure!( - Agenda::::get(when).agenda[0].is_none(), - "didn't remove from schedule" - ); - } - - change_named_priority { - let origin: RawOrigin = frame_system::RawOrigin::Root; - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - let idx = s - 1; - let id = u32_to_name(idx); - let priority = 42; - fill_schedule::(when, s)?; - }: _(origin, id, priority) - verify { - ensure!( - Agenda::::get(when).agenda[idx as usize].clone().unwrap().priority == priority, - "didn't change the priority" - ); - } - - impl_benchmark_test_suite!(Scheduler, crate::mock::new_test_ext(), crate::mock::Test); -} diff --git a/pallets/scheduler-v2/src/lib.rs b/pallets/scheduler-v2/src/lib.rs deleted file mode 100644 index 731d1fd55c..0000000000 --- a/pallets/scheduler-v2/src/lib.rs +++ /dev/null @@ -1,1338 +0,0 @@ -// Copyright 2019-2022 Unique Network (Gibraltar) Ltd. -// This file is part of Unique Network. - -// Unique Network is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Unique Network is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Unique Network. If not, see . - -// Original license: -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Scheduler -//! A Pallet for scheduling dispatches. -//! -//! - [`Config`] -//! - [`Call`] -//! - [`Pallet`] -//! -//! ## Overview -//! -//! This Pallet exposes capabilities for scheduling dispatches to occur at a -//! specified block number or at a specified period. These scheduled dispatches -//! may be named or anonymous and may be canceled. -//! -//! **NOTE:** The scheduled calls will be dispatched with the default filter -//! for the origin: namely `frame_system::Config::BaseCallFilter` for all origin -//! except root which will get no filter. And not the filter contained in origin -//! use to call `fn schedule`. -//! -//! If a call is scheduled using proxy or whatever mecanism which adds filter, -//! then those filter will not be used when dispatching the schedule call. -//! -//! ## Interface -//! -//! ### Dispatchable Functions -//! -//! * `schedule` - schedule a dispatch, which may be periodic, to occur at a specified block and -//! with a specified priority. -//! * `cancel` - cancel a scheduled dispatch, specified by block number and index. -//! * `schedule_named` - augments the `schedule` interface with an additional `Vec` parameter -//! that can be used for identification. -//! * `cancel_named` - the named complement to the cancel function. - -// Ensure we're `no_std` when compiling for Wasm. -#![cfg_attr(not(feature = "std"), no_std)] -#![deny(missing_docs)] - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; -#[cfg(test)] -mod mock; -#[cfg(test)] -mod tests; -// We dont use this pallet right now -#[allow(deprecated)] -pub mod weights; - -use codec::{Codec, Decode, Encode, MaxEncodedLen}; -use frame_support::{ - dispatch::{ - DispatchError, DispatchResult, Dispatchable, GetDispatchInfo, Parameter, PostDispatchInfo, - }, - traits::{ - schedule::{self, DispatchTime, LOWEST_PRIORITY}, - EnsureOrigin, Get, IsType, OriginTrait, PrivilegeCmp, StorageVersion, PreimageRecipient, - ConstU32, UnfilteredDispatchable, - }, - weights::Weight, - unsigned::TransactionValidityError, -}; - -use frame_system::{self as system}; -use scale_info::TypeInfo; -use sp_runtime::{ - traits::{BadOrigin, One, Saturating, Zero, Hash}, - BoundedVec, RuntimeDebug, DispatchErrorWithPostInfo, -}; -use sp_core::H160; -use sp_std::{cmp::Ordering, marker::PhantomData, prelude::*}; -pub use weights::WeightInfo; - -pub use pallet::*; - -/// Just a simple index for naming period tasks. -pub type PeriodicIndex = u32; -/// The location of a scheduled task that can be used to remove it. -pub type TaskAddress = (BlockNumber, u32); - -/// A an encoded bounded `Call`. Its encoding must be at most 128 bytes. -pub type EncodedCall = BoundedVec>; - -#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] -#[scale_info(skip_type_params(T))] -/// A scheduled call is stored as is or as a preimage hash to lookup. -/// This enum represents both variants. -pub enum ScheduledCall { - /// A an encoded bounded `Call`. Its encoding must be at most 128 bytes. - Inline(EncodedCall), - - /// A Blake2-256 hash of the call together with an upper limit for its size. - PreimageLookup { - /// A call hash to lookup - hash: T::Hash, - - /// The length of the decoded call - unbounded_len: u32, - }, -} - -impl ScheduledCall { - /// Convert an otherwise unbounded or large value into a type ready for placing in storage. - /// - /// NOTE: Once this API is used, you should use either `drop` or `realize`. - pub fn new(call: ::RuntimeCall) -> Result { - let encoded = call.encode(); - let len = encoded.len(); - - match EncodedCall::try_from(encoded.clone()) { - Ok(bounded) => Ok(Self::Inline(bounded)), - Err(_) => { - let hash = ::Hashing::hash_of(&encoded); - ::Preimages::note_preimage( - encoded - .try_into() - .map_err(|_| >::TooBigScheduledCall)?, - ); - - Ok(Self::PreimageLookup { - hash, - unbounded_len: len as u32, - }) - } - } - } - - /// The maximum length of the lookup that is needed to peek `Self`. - pub fn lookup_len(&self) -> Option { - match self { - Self::Inline(..) => None, - Self::PreimageLookup { unbounded_len, .. } => Some(*unbounded_len), - } - } - - /// Returns whether the image will require a lookup to be peeked. - pub fn lookup_needed(&self) -> bool { - match self { - Self::Inline(_) => false, - Self::PreimageLookup { .. } => true, - } - } - - // Decodes a runtime call - fn decode(mut data: &[u8]) -> Result<::RuntimeCall, DispatchError> { - ::RuntimeCall::decode(&mut data) - .map_err(|_| >::ScheduledCallCorrupted.into()) - } -} - -/// Weight Info for the Preimages fetches. -pub trait SchedulerPreimagesWeightInfo { - /// Get the weight of a task fetches with a given decoded length. - fn service_task_fetched(call_length: u32) -> Weight; -} - -impl SchedulerPreimagesWeightInfo for () { - fn service_task_fetched(_call_length: u32) -> Weight { - W::service_task_base() - } -} - -/// A scheduler's interface for managing preimages to hashes -/// and looking up preimages from their hash on-chain. -pub trait SchedulerPreimages: - PreimageRecipient + SchedulerPreimagesWeightInfo -{ - /// No longer request that the data for decoding the given `call` is available. - fn drop(call: &ScheduledCall); - - /// Convert the given `call` instance back into its original instance, also returning the - /// exact size of its encoded form if it needed to be looked-up from a stored preimage. - /// - /// NOTE: This does not remove any data needed for realization. If you will no longer use the - /// `call`, use `realize` instead or use `drop` afterwards. - fn peek( - call: &ScheduledCall, - ) -> Result<(::RuntimeCall, Option), DispatchError>; - - /// Convert the given scheduled `call` value back into its original instance. If successful, - /// `drop` any data backing it. This will not break the realisability of independently - /// created instances of `ScheduledCall` which happen to have identical data. - fn realize( - call: &ScheduledCall, - ) -> Result<(::RuntimeCall, Option), DispatchError>; -} - -impl + SchedulerPreimagesWeightInfo> - SchedulerPreimages for PP -{ - fn drop(call: &ScheduledCall) { - match call { - ScheduledCall::Inline(_) => {} - ScheduledCall::PreimageLookup { hash, .. } => Self::unrequest_preimage(hash), - } - } - - fn peek( - call: &ScheduledCall, - ) -> Result<(::RuntimeCall, Option), DispatchError> { - match call { - ScheduledCall::Inline(data) => Ok((ScheduledCall::::decode(data)?, None)), - ScheduledCall::PreimageLookup { - hash, - unbounded_len, - } => { - let (preimage, len) = Self::get_preimage(hash) - .ok_or(>::PreimageNotFound) - .map(|preimage| (preimage, *unbounded_len))?; - - Ok((ScheduledCall::::decode(preimage.as_slice())?, Some(len))) - } - } - } - - fn realize( - call: &ScheduledCall, - ) -> Result<(::RuntimeCall, Option), DispatchError> { - let r = Self::peek(call)?; - Self::drop(call); - Ok(r) - } -} - -/// Scheduler's supported origins. -pub enum ScheduledEnsureOriginSuccess { - /// A scheduled transaction has the Root origin. - Root, - - /// A specific account has signed a scheduled transaction. - Signed(AccountId), -} - -/// An identifier of a scheduled task. -pub type TaskName = [u8; 32]; - -/// Information regarding an item to be executed in the future. -#[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] -#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct Scheduled { - /// The unique identity for this task, if there is one. - maybe_id: Option, - - /// This task's priority. - priority: schedule::Priority, - - /// The call to be dispatched. - call: Call, - - /// If the call is periodic, then this points to the information concerning that. - maybe_periodic: Option>, - - /// The origin with which to dispatch the call. - origin: PalletsOrigin, - _phantom: PhantomData, -} - -/// Information regarding an item to be executed in the future. -pub type ScheduledOf = Scheduled< - TaskName, - ScheduledCall, - ::BlockNumber, - ::PalletsOrigin, - ::AccountId, ->; - -#[derive(Encode, Decode, MaxEncodedLen, TypeInfo)] -#[scale_info(skip_type_params(T))] -/// A structure for storing scheduled tasks in a block. -/// The `BlockAgenda` tracks the available free space for a new task in a block.4 -/// -/// The agenda's maximum amount of tasks is `T::MaxScheduledPerBlock`. -pub struct BlockAgenda { - agenda: BoundedVec>, T::MaxScheduledPerBlock>, - free_places: u32, -} - -impl BlockAgenda { - /// Tries to push a new scheduled task into the block's agenda. - /// If there is a free place, the new task will take it, - /// and the `BlockAgenda` will record that the number of free places has decreased. - /// - /// An error containing the scheduled task will be returned if there are no free places. - /// - /// The complexity of the check for the *existence* of a free place is O(1). - /// The complexity of *finding* the free slot is O(n). - fn try_push(&mut self, scheduled: ScheduledOf) -> Result> { - if self.free_places == 0 { - return Err(scheduled); - } - - self.free_places = self.free_places.saturating_sub(1); - - if (self.agenda.len() as u32) < T::MaxScheduledPerBlock::get() { - // will always succeed due to the above check. - let _ = self.agenda.try_push(Some(scheduled)); - Ok((self.agenda.len() - 1) as u32) - } else { - match self.agenda.iter().position(|i| i.is_none()) { - Some(hole_index) => { - self.agenda[hole_index] = Some(scheduled); - Ok(hole_index as u32) - } - None => unreachable!("free_places was greater than 0; qed"), - } - } - } - - /// Sets a slot by the given index and the slot value. - /// - /// ### Panics - /// If the index is out of range, the function will panic. - fn set_slot(&mut self, index: u32, slot: Option>) { - self.agenda[index as usize] = slot; - } - - /// Returns an iterator containing references to the agenda's slots. - fn iter(&self) -> impl Iterator>> + '_ { - self.agenda.iter() - } - - /// Returns an immutable reference to a scheduled task if there is one under the given index. - /// - /// The function returns `None` if: - /// * The `index` is out of range - /// * No scheduled task occupies the agenda slot under the given index. - fn get(&self, index: u32) -> Option<&ScheduledOf> { - match self.agenda.get(index as usize) { - Some(Some(scheduled)) => Some(scheduled), - _ => None, - } - } - - /// Returns a mutable reference to a scheduled task if there is one under the given index. - /// - /// The function returns `None` if: - /// * The `index` is out of range - /// * No scheduled task occupies the agenda slot under the given index. - fn get_mut(&mut self, index: u32) -> Option<&mut ScheduledOf> { - match self.agenda.get_mut(index as usize) { - Some(Some(scheduled)) => Some(scheduled), - _ => None, - } - } - - /// Take a scheduled task by the given index. - /// - /// If there is a task under the index, the function will: - /// * Free the corresponding agenda slot. - /// * Decrease the number of free places. - /// * Return the scheduled task. - /// - /// The function returns `None` if there is no task under the index. - fn take(&mut self, index: u32) -> Option> { - let removed = self.agenda.get_mut(index as usize)?.take(); - - if removed.is_some() { - self.free_places = self.free_places.saturating_add(1); - } - - removed - } -} - -impl Default for BlockAgenda { - fn default() -> Self { - let agenda = Default::default(); - let free_places = T::MaxScheduledPerBlock::get(); - - Self { - agenda, - free_places, - } - } -} -/// A structure for tracking the used weight -/// and checking if it does not exceed the weight limit. -struct WeightCounter { - used: Weight, - limit: Weight, -} - -impl WeightCounter { - /// Checks if the weight `w` can be accommodated by the counter. - /// - /// If there is room for the additional weight `w`, - /// the function will update the used weight and return true. - fn check_accrue(&mut self, w: Weight) -> bool { - let test = self.used.saturating_add(w); - if test.any_gt(self.limit) { - false - } else { - self.used = test; - true - } - } - - /// Checks if the weight `w` can be accommodated by the counter. - fn can_accrue(&mut self, w: Weight) -> bool { - self.used.saturating_add(w).all_lte(self.limit) - } -} - -pub(crate) struct MarginalWeightInfo(sp_std::marker::PhantomData); - -impl MarginalWeightInfo { - /// Return the weight of servicing a single task. - fn service_task(maybe_lookup_len: Option, named: bool, periodic: bool) -> Weight { - let base = T::WeightInfo::service_task_base(); - let mut total = match maybe_lookup_len { - None => base, - Some(l) => T::Preimages::service_task_fetched(l as u32), - }; - if named { - total.saturating_accrue(T::WeightInfo::service_task_named().saturating_sub(base)); - } - if periodic { - total.saturating_accrue(T::WeightInfo::service_task_periodic().saturating_sub(base)); - } - total - } -} - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*}; - use system::pallet_prelude::*; - - /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); - - #[pallet::pallet] - #[pallet::storage_version(STORAGE_VERSION)] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - - /// The aggregated origin which the dispatch will take. - type RuntimeOrigin: OriginTrait - + From - + IsType<::RuntimeOrigin> - + Clone; - - /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: From> - + Codec - + Clone - + Eq - + TypeInfo - + MaxEncodedLen; - - /// The aggregated call type. - type RuntimeCall: Parameter - + Dispatchable< - RuntimeOrigin = ::RuntimeOrigin, - PostInfo = PostDispatchInfo, - > + UnfilteredDispatchable::RuntimeOrigin> - + GetDispatchInfo - + From>; - - /// The maximum weight that may be scheduled per block for any dispatchables. - #[pallet::constant] - type MaximumWeight: Get; - - /// Required origin to schedule or cancel calls. - type ScheduleOrigin: EnsureOrigin< - ::RuntimeOrigin, - Success = ScheduledEnsureOriginSuccess, - >; - - /// Compare the privileges of origins. - /// - /// This will be used when canceling a task, to ensure that the origin that tries - /// to cancel has greater or equal privileges as the origin that created the scheduled task. - /// - /// For simplicity the [`EqualPrivilegeOnly`](frame_support::traits::EqualPrivilegeOnly) can - /// be used. This will only check if two given origins are equal. - type OriginPrivilegeCmp: PrivilegeCmp; - - /// The maximum number of scheduled calls in the queue for a single block. - #[pallet::constant] - type MaxScheduledPerBlock: Get; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; - - /// The preimage provider with which we look up call hashes to get the call. - type Preimages: SchedulerPreimages; - - /// The helper type used for custom transaction fee logic. - type CallExecutor: DispatchCall; - - /// Required origin to set/change calls' priority. - type PrioritySetOrigin: EnsureOrigin<::RuntimeOrigin>; - } - - /// It contains the block number from which we should service tasks. - /// It's used for delaying the servicing of future blocks' agendas if we had overweight tasks. - #[pallet::storage] - pub type IncompleteSince = StorageValue<_, T::BlockNumber>; - - /// Items to be executed, indexed by the block number that they should be executed on. - #[pallet::storage] - pub type Agenda = - StorageMap<_, Twox64Concat, T::BlockNumber, BlockAgenda, ValueQuery>; - - /// Lookup from a name to the block number and index of the task. - #[pallet::storage] - pub(crate) type Lookup = - StorageMap<_, Twox64Concat, TaskName, TaskAddress>; - - /// Events type. - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// Scheduled some task. - Scheduled { - /// The block number in which the scheduled task should be executed. - when: T::BlockNumber, - - /// The index of the block's agenda slot. - index: u32, - }, - /// Canceled some task. - Canceled { - /// The block number in which the canceled task has been. - when: T::BlockNumber, - - /// The index of the block's agenda slot that had become available. - index: u32, - }, - /// Dispatched some task. - Dispatched { - /// The task's address - the block number and the block's agenda index. - task: TaskAddress, - - /// The task's name if it is not anonymous. - id: Option<[u8; 32]>, - - /// The task's execution result. - result: DispatchResult, - }, - /// Scheduled task's priority has changed - PriorityChanged { - /// The task's address - the block number and the block's agenda index. - task: TaskAddress, - - /// The new priority of the task. - priority: schedule::Priority, - }, - /// The call for the provided hash was not found so the task has been aborted. - CallUnavailable { - /// The task's address - the block number and the block's agenda index. - task: TaskAddress, - - /// The task's name if it is not anonymous. - id: Option<[u8; 32]>, - }, - /// The given task can never be executed since it is overweight. - PermanentlyOverweight { - /// The task's address - the block number and the block's agenda index. - task: TaskAddress, - - /// The task's name if it is not anonymous. - id: Option<[u8; 32]>, - }, - } - - #[pallet::error] - pub enum Error { - /// Failed to schedule a call - FailedToSchedule, - /// There is no place for a new task in the agenda - AgendaIsExhausted, - /// Scheduled call is corrupted - ScheduledCallCorrupted, - /// Scheduled call preimage is not found - PreimageNotFound, - /// Scheduled call is too big - TooBigScheduledCall, - /// Cannot find the scheduled call. - NotFound, - /// Given target block number is in the past. - TargetBlockNumberInPast, - /// Attempt to use a non-named function on a named task. - Named, - } - - #[pallet::hooks] - impl Hooks> for Pallet { - /// Execute the scheduled calls - fn on_initialize(now: T::BlockNumber) -> Weight { - let mut weight_counter = WeightCounter { - used: Weight::zero(), - limit: T::MaximumWeight::get(), - }; - Self::service_agendas(&mut weight_counter, now, u32::max_value()); - weight_counter.used - } - } - - #[pallet::call] - impl Pallet { - /// Anonymously schedule a task. - /// - /// Only `T::ScheduleOrigin` is allowed to schedule a task. - /// Only `T::PrioritySetOrigin` is allowed to set the task's priority. - #[pallet::call_index(0)] - #[pallet::weight(::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))] - pub fn schedule( - origin: OriginFor, - when: T::BlockNumber, - maybe_periodic: Option>, - priority: Option, - call: Box<::RuntimeCall>, - ) -> DispatchResult { - T::ScheduleOrigin::ensure_origin(origin.clone())?; - - if priority.is_some() { - T::PrioritySetOrigin::ensure_origin(origin.clone())?; - } - - let origin = ::RuntimeOrigin::from(origin); - Self::do_schedule( - DispatchTime::At(when), - maybe_periodic, - priority.unwrap_or(LOWEST_PRIORITY), - origin.caller().clone(), - >::new(*call)?, - )?; - Ok(()) - } - - /// Cancel an anonymously scheduled task. - /// - /// The `T::OriginPrivilegeCmp` decides whether the given origin is allowed to cancel the task or not. - #[pallet::call_index(1)] - #[pallet::weight(::WeightInfo::cancel(T::MaxScheduledPerBlock::get()))] - pub fn cancel(origin: OriginFor, when: T::BlockNumber, index: u32) -> DispatchResult { - T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::RuntimeOrigin::from(origin); - Self::do_cancel(Some(origin.caller().clone()), (when, index))?; - Ok(()) - } - - /// Schedule a named task. - /// - /// Only `T::ScheduleOrigin` is allowed to schedule a task. - /// Only `T::PrioritySetOrigin` is allowed to set the task's priority. - #[pallet::call_index(2)] - #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] - pub fn schedule_named( - origin: OriginFor, - id: TaskName, - when: T::BlockNumber, - maybe_periodic: Option>, - priority: Option, - call: Box<::RuntimeCall>, - ) -> DispatchResult { - T::ScheduleOrigin::ensure_origin(origin.clone())?; - - if priority.is_some() { - T::PrioritySetOrigin::ensure_origin(origin.clone())?; - } - - let origin = ::RuntimeOrigin::from(origin); - Self::do_schedule_named( - id, - DispatchTime::At(when), - maybe_periodic, - priority.unwrap_or(LOWEST_PRIORITY), - origin.caller().clone(), - >::new(*call)?, - )?; - Ok(()) - } - - /// Cancel a named scheduled task. - /// - /// The `T::OriginPrivilegeCmp` decides whether the given origin is allowed to cancel the task or not. - #[pallet::call_index(3)] - #[pallet::weight(::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))] - pub fn cancel_named(origin: OriginFor, id: TaskName) -> DispatchResult { - T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::RuntimeOrigin::from(origin); - Self::do_cancel_named(Some(origin.caller().clone()), id)?; - Ok(()) - } - - /// Anonymously schedule a task after a delay. - /// - /// # - /// Same as [`schedule`]. - /// # - #[pallet::call_index(4)] - #[pallet::weight(::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))] - pub fn schedule_after( - origin: OriginFor, - after: T::BlockNumber, - maybe_periodic: Option>, - priority: Option, - call: Box<::RuntimeCall>, - ) -> DispatchResult { - T::ScheduleOrigin::ensure_origin(origin.clone())?; - - if priority.is_some() { - T::PrioritySetOrigin::ensure_origin(origin.clone())?; - } - - let origin = ::RuntimeOrigin::from(origin); - Self::do_schedule( - DispatchTime::After(after), - maybe_periodic, - priority.unwrap_or(LOWEST_PRIORITY), - origin.caller().clone(), - >::new(*call)?, - )?; - Ok(()) - } - - /// Schedule a named task after a delay. - /// - /// Only `T::ScheduleOrigin` is allowed to schedule a task. - /// Only `T::PrioritySetOrigin` is allowed to set the task's priority. - /// - /// # - /// Same as [`schedule_named`](Self::schedule_named). - /// # - #[pallet::call_index(5)] - #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] - pub fn schedule_named_after( - origin: OriginFor, - id: TaskName, - after: T::BlockNumber, - maybe_periodic: Option>, - priority: Option, - call: Box<::RuntimeCall>, - ) -> DispatchResult { - T::ScheduleOrigin::ensure_origin(origin.clone())?; - - if priority.is_some() { - T::PrioritySetOrigin::ensure_origin(origin.clone())?; - } - - let origin = ::RuntimeOrigin::from(origin); - Self::do_schedule_named( - id, - DispatchTime::After(after), - maybe_periodic, - priority.unwrap_or(LOWEST_PRIORITY), - origin.caller().clone(), - >::new(*call)?, - )?; - Ok(()) - } - - /// Change a named task's priority. - /// - /// Only the `T::PrioritySetOrigin` is allowed to change the task's priority. - #[pallet::call_index(6)] - #[pallet::weight(::WeightInfo::change_named_priority(T::MaxScheduledPerBlock::get()))] - pub fn change_named_priority( - origin: OriginFor, - id: TaskName, - priority: schedule::Priority, - ) -> DispatchResult { - T::PrioritySetOrigin::ensure_origin(origin.clone())?; - let origin = ::RuntimeOrigin::from(origin); - Self::do_change_named_priority(origin.caller().clone(), id, priority) - } - } -} - -impl Pallet { - /// Converts the `DispatchTime` to the `BlockNumber`. - /// - /// Returns an error if the block number is in the past. - fn resolve_time(when: DispatchTime) -> Result { - let now = frame_system::Pallet::::block_number(); - - let when = match when { - DispatchTime::At(x) => x, - // The current block has already completed it's scheduled tasks, so - // Schedule the task at lest one block after this current block. - DispatchTime::After(x) => now.saturating_add(x).saturating_add(One::one()), - }; - - if when <= now { - return Err(Error::::TargetBlockNumberInPast.into()); - } - - Ok(when) - } - - /// Places the mandatory task. - /// - /// It will try to place the task into the block pointed by the `when` parameter. - /// - /// If the block has no room for a task, - /// the function will search for a future block that can accommodate the task. - fn mandatory_place_task(when: T::BlockNumber, what: ScheduledOf) { - Self::place_task(when, what, true).expect("mandatory place task always succeeds; qed"); - } - - /// Tries to place a task `what` into the given block `when`. - /// - /// Returns an error if the block has no room for the task. - fn try_place_task( - when: T::BlockNumber, - what: ScheduledOf, - ) -> Result, DispatchError> { - Self::place_task(when, what, false) - } - - /// If `is_mandatory` is true, the function behaves like [`mandatory_place_task`](Self::mandatory_place_task); - /// otherwise it acts like [`try_place_task`](Self::try_place_task). - /// - /// The function also updates the `Lookup` storage. - fn place_task( - mut when: T::BlockNumber, - what: ScheduledOf, - is_mandatory: bool, - ) -> Result, DispatchError> { - let maybe_name = what.maybe_id; - let index = Self::push_to_agenda(&mut when, what, is_mandatory)?; - let address = (when, index); - if let Some(name) = maybe_name { - Lookup::::insert(name, address) - } - Self::deposit_event(Event::Scheduled { - when: address.0, - index: address.1, - }); - Ok(address) - } - - /// Pushes the scheduled task into the block's agenda. - /// - /// If `is_mandatory` is true, it searches for a block with a free slot for the given task. - /// - /// If `is_mandatory` is false and there is no free slot, the function returns an error. - fn push_to_agenda( - when: &mut T::BlockNumber, - mut what: ScheduledOf, - is_mandatory: bool, - ) -> Result { - let mut agenda; - - let index = loop { - agenda = Agenda::::get(*when); - - match agenda.try_push(what) { - Ok(index) => break index, - Err(returned_what) if is_mandatory => { - what = returned_what; - when.saturating_inc(); - } - Err(_) => return Err(>::AgendaIsExhausted.into()), - } - }; - - Agenda::::insert(when, agenda); - Ok(index) - } - - fn do_schedule( - when: DispatchTime, - maybe_periodic: Option>, - priority: schedule::Priority, - origin: T::PalletsOrigin, - call: ScheduledCall, - ) -> Result, DispatchError> { - let when = Self::resolve_time(when)?; - - // sanitize maybe_periodic - let maybe_periodic = maybe_periodic - .filter(|p| p.1 > 1 && !p.0.is_zero()) - // Remove one from the number of repetitions since we will schedule one now. - .map(|(p, c)| (p, c - 1)); - let task = Scheduled { - maybe_id: None, - priority, - call, - maybe_periodic, - origin, - _phantom: PhantomData, - }; - Self::try_place_task(when, task) - } - - fn do_cancel( - origin: Option, - (when, index): TaskAddress, - ) -> Result<(), DispatchError> { - let scheduled = Agenda::::try_mutate( - when, - |agenda| -> Result>, DispatchError> { - let scheduled = match agenda.get(index) { - Some(scheduled) => scheduled, - None => return Ok(None), - }; - - if let Some(ref o) = origin { - if matches!( - T::OriginPrivilegeCmp::cmp_privilege(o, &scheduled.origin), - Some(Ordering::Less) | None - ) { - return Err(BadOrigin.into()); - } - } - - Ok(agenda.take(index)) - }, - )?; - if let Some(s) = scheduled { - T::Preimages::drop(&s.call); - - if let Some(id) = s.maybe_id { - Lookup::::remove(id); - } - Self::deposit_event(Event::Canceled { when, index }); - Ok(()) - } else { - Err(Error::::NotFound.into()) - } - } - - fn do_schedule_named( - id: TaskName, - when: DispatchTime, - maybe_periodic: Option>, - priority: schedule::Priority, - origin: T::PalletsOrigin, - call: ScheduledCall, - ) -> Result, DispatchError> { - // ensure id it is unique - if Lookup::::contains_key(id) { - return Err(Error::::FailedToSchedule.into()); - } - - let when = Self::resolve_time(when)?; - - // sanitize maybe_periodic - let maybe_periodic = maybe_periodic - .filter(|p| p.1 > 1 && !p.0.is_zero()) - // Remove one from the number of repetitions since we will schedule one now. - .map(|(p, c)| (p, c - 1)); - - let task = Scheduled { - maybe_id: Some(id), - priority, - call, - maybe_periodic, - origin, - _phantom: Default::default(), - }; - Self::try_place_task(when, task) - } - - fn do_cancel_named(origin: Option, id: TaskName) -> DispatchResult { - Lookup::::try_mutate_exists(id, |lookup| -> DispatchResult { - if let Some((when, index)) = lookup.take() { - Agenda::::try_mutate(when, |agenda| -> DispatchResult { - let scheduled = match agenda.get(index) { - Some(scheduled) => scheduled, - None => return Ok(()), - }; - - if let Some(ref o) = origin { - if matches!( - T::OriginPrivilegeCmp::cmp_privilege(o, &scheduled.origin), - Some(Ordering::Less) | None - ) { - return Err(BadOrigin.into()); - } - T::Preimages::drop(&scheduled.call); - } - - agenda.take(index); - - Ok(()) - })?; - Self::deposit_event(Event::Canceled { when, index }); - Ok(()) - } else { - Err(Error::::NotFound.into()) - } - }) - } - - fn do_change_named_priority( - origin: T::PalletsOrigin, - id: TaskName, - priority: schedule::Priority, - ) -> DispatchResult { - match Lookup::::get(id) { - Some((when, index)) => Agenda::::try_mutate(when, |agenda| { - let scheduled = match agenda.get_mut(index) { - Some(scheduled) => scheduled, - None => return Ok(()), - }; - - if matches!( - T::OriginPrivilegeCmp::cmp_privilege(&origin, &scheduled.origin), - Some(Ordering::Less) | None - ) { - return Err(BadOrigin.into()); - } - - scheduled.priority = priority; - Self::deposit_event(Event::PriorityChanged { - task: (when, index), - priority, - }); - - Ok(()) - }), - None => Err(Error::::NotFound.into()), - } - } -} - -enum ServiceTaskError { - /// Could not be executed due to missing preimage. - Unavailable, - /// Could not be executed due to weight limitations. - Overweight, -} -use ServiceTaskError::*; - -/// A Scheduler-Runtime interface for finer payment handling. -pub trait DispatchCall { - /// Resolve the call dispatch, including any post-dispatch operations. - fn dispatch_call( - signer: Option, - function: ::RuntimeCall, - ) -> Result< - Result>, - TransactionValidityError, - >; -} - -impl Pallet { - /// Service up to `max` agendas queue starting from earliest incompletely executed agenda. - fn service_agendas(weight: &mut WeightCounter, now: T::BlockNumber, max: u32) { - if !weight.check_accrue(T::WeightInfo::service_agendas_base()) { - return; - } - - let mut incomplete_since = now + One::one(); - let mut when = IncompleteSince::::take().unwrap_or(now); - let mut executed = 0; - - let max_items = T::MaxScheduledPerBlock::get(); - let mut count_down = max; - let service_agenda_base_weight = T::WeightInfo::service_agenda_base(max_items); - while count_down > 0 && when <= now && weight.can_accrue(service_agenda_base_weight) { - if !Self::service_agenda(weight, &mut executed, now, when, u32::max_value()) { - incomplete_since = incomplete_since.min(when); - } - when.saturating_inc(); - count_down.saturating_dec(); - } - incomplete_since = incomplete_since.min(when); - if incomplete_since <= now { - IncompleteSince::::put(incomplete_since); - } - } - - /// Returns `true` if the agenda was fully completed, `false` if it should be revisited at a - /// later block. - fn service_agenda( - weight: &mut WeightCounter, - executed: &mut u32, - now: T::BlockNumber, - when: T::BlockNumber, - max: u32, - ) -> bool { - let mut agenda = Agenda::::get(when); - let mut ordered = agenda - .iter() - .enumerate() - .filter_map(|(index, maybe_item)| { - maybe_item - .as_ref() - .map(|item| (index as u32, item.priority)) - }) - .collect::>(); - ordered.sort_by_key(|k| k.1); - let within_limit = - weight.check_accrue(T::WeightInfo::service_agenda_base(ordered.len() as u32)); - debug_assert!( - within_limit, - "weight limit should have been checked in advance" - ); - - // Items which we know can be executed and have postponed for execution in a later block. - let mut postponed = (ordered.len() as u32).saturating_sub(max); - // Items which we don't know can ever be executed. - let mut dropped = 0; - - for (agenda_index, _) in ordered.into_iter().take(max as usize) { - let task = match agenda.take(agenda_index).take() { - None => continue, - Some(t) => t, - }; - let base_weight = MarginalWeightInfo::::service_task( - task.call.lookup_len().map(|x| x as usize), - task.maybe_id.is_some(), - task.maybe_periodic.is_some(), - ); - if !weight.can_accrue(base_weight) { - postponed += 1; - break; - } - let result = Self::service_task(weight, now, when, agenda_index, *executed == 0, task); - match result { - Err((Unavailable, slot)) => { - dropped += 1; - agenda.set_slot(agenda_index, slot); - } - Err((Overweight, slot)) => { - postponed += 1; - agenda.set_slot(agenda_index, slot); - } - Ok(()) => { - *executed += 1; - } - }; - } - if postponed > 0 || dropped > 0 { - Agenda::::insert(when, agenda); - } else { - Agenda::::remove(when); - } - postponed == 0 - } - - /// Service (i.e. execute) the given task, being careful not to overflow the `weight` counter. - /// - /// This involves: - /// - removing and potentially replacing the `Lookup` entry for the task. - /// - realizing the task's call which can include a preimage lookup. - /// - Rescheduling the task for execution in a later agenda if periodic. - fn service_task( - weight: &mut WeightCounter, - now: T::BlockNumber, - when: T::BlockNumber, - agenda_index: u32, - is_first: bool, - mut task: ScheduledOf, - ) -> Result<(), (ServiceTaskError, Option>)> { - let (call, lookup_len) = match T::Preimages::peek(&task.call) { - Ok(c) => c, - Err(_) => { - if let Some(ref id) = task.maybe_id { - Lookup::::remove(id); - } - - return Err((Unavailable, Some(task))); - } - }; - - weight.check_accrue(MarginalWeightInfo::::service_task( - lookup_len.map(|x| x as usize), - task.maybe_id.is_some(), - task.maybe_periodic.is_some(), - )); - - match Self::execute_dispatch(weight, task.origin.clone(), call) { - Err(Unavailable) => { - debug_assert!(false, "Checked to exist with `peek`"); - - if let Some(ref id) = task.maybe_id { - Lookup::::remove(id); - } - - Self::deposit_event(Event::CallUnavailable { - task: (when, agenda_index), - id: task.maybe_id, - }); - Err((Unavailable, Some(task))) - } - Err(Overweight) if is_first && !Self::is_runtime_upgraded() => { - T::Preimages::drop(&task.call); - - if let Some(ref id) = task.maybe_id { - Lookup::::remove(id); - } - - Self::deposit_event(Event::PermanentlyOverweight { - task: (when, agenda_index), - id: task.maybe_id, - }); - Err((Unavailable, Some(task))) - } - Err(Overweight) => { - // Preserve Lookup -- the task will be postponed. - Err((Overweight, Some(task))) - } - Ok(result) => { - Self::deposit_event(Event::Dispatched { - task: (when, agenda_index), - id: task.maybe_id, - result, - }); - - let is_canceled = task - .maybe_id - .as_ref() - .map(|id| !Lookup::::contains_key(id)) - .unwrap_or(false); - - match &task.maybe_periodic { - &Some((period, count)) if !is_canceled => { - if count > 1 { - task.maybe_periodic = Some((period, count - 1)); - } else { - task.maybe_periodic = None; - } - let wake = now.saturating_add(period); - Self::mandatory_place_task(wake, task); - } - _ => { - if let Some(ref id) = task.maybe_id { - Lookup::::remove(id); - } - - T::Preimages::drop(&task.call) - } - } - Ok(()) - } - } - } - - fn is_runtime_upgraded() -> bool { - let last = system::LastRuntimeUpgrade::::get(); - let current = T::Version::get(); - - last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) - } - - /// Make a dispatch to the given `call` from the given `origin`, ensuring that the `weight` - /// counter does not exceed its limit and that it is counted accurately (e.g. accounted using - /// post info if available). - /// - /// NOTE: Only the weight for this function will be counted (origin lookup, dispatch and the - /// call itself). - fn execute_dispatch( - weight: &mut WeightCounter, - origin: T::PalletsOrigin, - call: ::RuntimeCall, - ) -> Result { - let dispatch_origin: ::RuntimeOrigin = origin.into(); - let base_weight = match dispatch_origin.clone().as_signed() { - Some(_) => T::WeightInfo::execute_dispatch_signed(), - _ => T::WeightInfo::execute_dispatch_unsigned(), - }; - let call_weight = call.get_dispatch_info().weight; - // We only allow a scheduled call if it cannot push the weight past the limit. - let max_weight = base_weight.saturating_add(call_weight); - - if !weight.can_accrue(max_weight) { - return Err(Overweight); - } - - let ensured_origin = T::ScheduleOrigin::ensure_origin(dispatch_origin.into()); - - let r = match ensured_origin { - Ok(ScheduledEnsureOriginSuccess::Root) => { - Ok(call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into())) - } - Ok(ScheduledEnsureOriginSuccess::Signed(sender)) => { - // Execute transaction via chain default pipeline - // That means dispatch will be processed like any user's extrinsic e.g. transaction fees will be taken - T::CallExecutor::dispatch_call(Some(sender), call) - } - Err(e) => Ok(Err(e.into())), - }; - - let (maybe_actual_call_weight, result) = match r { - Ok(result) => match result { - Ok(post_info) => (post_info.actual_weight, Ok(())), - Err(error_and_info) => ( - error_and_info.post_info.actual_weight, - Err(error_and_info.error), - ), - }, - Err(_) => { - log::error!( - target: "runtime::scheduler", - "Warning: Scheduler has failed to execute a post-dispatch transaction. \ - This block might have become invalid."); - (None, Err(DispatchError::CannotLookup)) - } - }; - let call_weight = maybe_actual_call_weight.unwrap_or(call_weight); - weight.check_accrue(base_weight); - weight.check_accrue(call_weight); - Ok(result) - } -} diff --git a/pallets/scheduler-v2/src/mock.rs b/pallets/scheduler-v2/src/mock.rs deleted file mode 100644 index fd8a839643..0000000000 --- a/pallets/scheduler-v2/src/mock.rs +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2019-2022 Unique Network (Gibraltar) Ltd. -// This file is part of Unique Network. - -// Unique Network is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Unique Network is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Unique Network. If not, see . - -// Original license: -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Scheduler test environment. -#![allow(deprecated)] - -use super::*; - -use crate as scheduler; -use frame_support::{ - ord_parameter_types, parameter_types, - traits::{ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnFinalize, OnInitialize}, - weights::constants::RocksDbWeight, -}; -use frame_system::{EnsureRoot, RawOrigin}; -use sp_core::H256; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; - -// Logger module to track execution. -#[frame_support::pallet] -pub mod logger { - use super::{OriginCaller, OriginTrait}; - use frame_support::{pallet_prelude::*, parameter_types}; - use frame_system::pallet_prelude::*; - - parameter_types! { - static Log: Vec<(OriginCaller, u32)> = Vec::new(); - } - pub fn log() -> Vec<(OriginCaller, u32)> { - Log::get().clone() - } - - #[pallet::pallet] - pub struct Pallet(PhantomData); - - #[pallet::hooks] - impl Hooks> for Pallet {} - - #[pallet::config] - pub trait Config: frame_system::Config { - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - Logged(u32, Weight), - } - - #[pallet::call] - impl Pallet - where - ::RuntimeOrigin: OriginTrait, - { - #[pallet::call_index(0)] - #[pallet::weight(*weight)] - pub fn log(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { - Self::deposit_event(Event::Logged(i, weight)); - Log::mutate(|log| { - log.push((origin.caller().clone(), i)); - }); - Ok(()) - } - - #[pallet::call_index(1)] - #[pallet::weight(*weight)] - pub fn log_without_filter(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { - Self::deposit_event(Event::Logged(i, weight)); - Log::mutate(|log| { - log.push((origin.caller().clone(), i)); - }); - Ok(()) - } - } -} - -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; - -frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Logger: logger::{Pallet, Call, Event}, - Scheduler: scheduler::{Pallet, Call, Storage, Event}, - } -); - -// Scheduler must dispatch with root and no filter, this tests base filter is indeed not used. -pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(call: &RuntimeCall) -> bool { - !matches!(call, RuntimeCall::Logger(LoggerCall::log { .. })) - } -} - -parameter_types! { - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - Weight::from_ref_time(2_000_000_000_000).set_proof_size(u64::MAX) - ); -} -impl system::Config for Test { - type BaseCallFilter = BaseFilter; - type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; -} -impl logger::Config for Test { - type RuntimeEvent = RuntimeEvent; -} -ord_parameter_types! { - pub const One: u64 = 1; -} - -pub struct TestWeightInfo; -impl WeightInfo for TestWeightInfo { - fn service_agendas_base() -> Weight { - Weight::from_ref_time(0b0000_0001) - } - fn service_agenda_base(i: u32) -> Weight { - Weight::from_ref_time((i << 8) as u64 + 0b0000_0010) - } - fn service_task_base() -> Weight { - Weight::from_ref_time(0b0000_0100) - } - fn service_task_periodic() -> Weight { - Weight::from_ref_time(0b0000_1100) - } - fn service_task_named() -> Weight { - Weight::from_ref_time(0b0001_0100) - } - // fn service_task_fetched(s: u32) -> Weight { - // Weight::from_ref_time((s << 8) as u64 + 0b0010_0100) - // } - fn execute_dispatch_signed() -> Weight { - Weight::from_ref_time(0b0100_0000) - } - fn execute_dispatch_unsigned() -> Weight { - Weight::from_ref_time(0b1000_0000) - } - fn schedule(_s: u32) -> Weight { - Weight::from_ref_time(50) - } - fn cancel(_s: u32) -> Weight { - Weight::from_ref_time(50) - } - fn schedule_named(_s: u32) -> Weight { - Weight::from_ref_time(50) - } - fn cancel_named(_s: u32) -> Weight { - Weight::from_ref_time(50) - } - fn change_named_priority(_s: u32) -> Weight { - Weight::from_ref_time(50) - } -} -parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * - BlockWeights::get().max_block; -} - -pub struct EnsureSignedOneOrRoot; -impl, O>> + From>> EnsureOrigin - for EnsureSignedOneOrRoot -{ - type Success = ScheduledEnsureOriginSuccess; - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Root => Ok(ScheduledEnsureOriginSuccess::Root), - RawOrigin::Signed(1) => Ok(ScheduledEnsureOriginSuccess::Signed(1)), - r => Err(O::from(r)), - }) - } - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Ok(O::from(RawOrigin::Root)) - } -} - -pub struct Executor; -impl DispatchCall for Executor { - fn dispatch_call( - signer: Option, - function: RuntimeCall, - ) -> Result< - Result>, - TransactionValidityError, - > { - let origin = match signer { - Some(who) => RuntimeOrigin::signed(who), - None => RuntimeOrigin::none(), - }; - Ok(function.dispatch(origin)) - } -} - -impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; - type PalletsOrigin = OriginCaller; - type RuntimeCall = RuntimeCall; - type MaximumWeight = MaximumSchedulerWeight; - type ScheduleOrigin = EnsureSignedOneOrRoot; - type MaxScheduledPerBlock = ConstU32<10>; - type WeightInfo = TestWeightInfo; - type OriginPrivilegeCmp = EqualPrivilegeOnly; - type Preimages = (); - type PrioritySetOrigin = EnsureRoot; - type CallExecutor = Executor; -} - -pub type LoggerCall = logger::Call; - -pub type SystemCall = frame_system::Call; - -pub fn new_test_ext() -> sp_io::TestExternalities { - let t = system::GenesisConfig::default() - .build_storage::() - .unwrap(); - t.into() -} - -pub fn run_to_block(n: u64) { - while System::block_number() < n { - Scheduler::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - Scheduler::on_initialize(System::block_number()); - } -} - -pub fn root() -> OriginCaller { - system::RawOrigin::Root.into() -} diff --git a/pallets/scheduler-v2/src/tests.rs b/pallets/scheduler-v2/src/tests.rs deleted file mode 100644 index 916a5a991e..0000000000 --- a/pallets/scheduler-v2/src/tests.rs +++ /dev/null @@ -1,901 +0,0 @@ -// Copyright 2019-2022 Unique Network (Gibraltar) Ltd. -// This file is part of Unique Network. - -// Unique Network is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Unique Network is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Unique Network. If not, see . - -// Original license: -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Scheduler tests. -#![allow(deprecated)] - -use super::*; -use crate::mock::{ - logger, new_test_ext, root, run_to_block, LoggerCall, RuntimeCall, Scheduler, Test, *, -}; -use frame_support::{ - assert_noop, assert_ok, - traits::{Contains, OnInitialize}, - assert_err, -}; - -#[test] -fn basic_scheduling_works() { - new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - }); - assert!(!::BaseCallFilter::contains( - &call - )); - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - None, - 127, - root(), - >::new(call).unwrap(), - )); - run_to_block(3); - assert!(logger::log().is_empty()); - run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - }); -} - -#[test] -fn schedule_after_works() { - new_test_ext().execute_with(|| { - run_to_block(2); - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - }); - assert!(!::BaseCallFilter::contains( - &call - )); - // This will schedule the call 3 blocks after the next block... so block 3 + 3 = 6 - assert_ok!(Scheduler::do_schedule( - DispatchTime::After(3), - None, - 127, - root(), - >::new(call).unwrap(), - )); - run_to_block(5); - assert!(logger::log().is_empty()); - run_to_block(6); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - }); -} - -#[test] -fn schedule_after_zero_works() { - new_test_ext().execute_with(|| { - run_to_block(2); - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - }); - assert!(!::BaseCallFilter::contains( - &call - )); - assert_ok!(Scheduler::do_schedule( - DispatchTime::After(0), - None, - 127, - root(), - >::new(call).unwrap(), - )); - // Will trigger on the next block. - run_to_block(3); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - }); -} - -#[test] -fn periodic_scheduling_works() { - new_test_ext().execute_with(|| { - // at #4, every 3 blocks, 3 times. - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - Some((3, 3)), - 127, - root(), - >::new(RuntimeCall::Logger(logger::Call::log { - i: 42, - weight: Weight::from_ref_time(10) - })) - .unwrap() - )); - run_to_block(3); - assert!(logger::log().is_empty()); - run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(6); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(7); - assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); - run_to_block(9); - assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); - run_to_block(10); - assert_eq!( - logger::log(), - vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)] - ); - run_to_block(100); - assert_eq!( - logger::log(), - vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)] - ); - }); -} - -#[test] -fn cancel_named_scheduling_works_with_normal_cancel() { - new_test_ext().execute_with(|| { - // at #4. - Scheduler::do_schedule_named( - [1u8; 32], - DispatchTime::At(4), - None, - 127, - root(), - >::new(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })) - .unwrap(), - ) - .unwrap(); - let i = Scheduler::do_schedule( - DispatchTime::At(4), - None, - 127, - root(), - >::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })) - .unwrap(), - ) - .unwrap(); - run_to_block(3); - assert!(logger::log().is_empty()); - assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32])); - assert_ok!(Scheduler::do_cancel(None, i)); - run_to_block(100); - assert!(logger::log().is_empty()); - }); -} - -#[test] -fn cancel_named_periodic_scheduling_works() { - new_test_ext().execute_with(|| { - // at #4, every 3 blocks, 3 times. - Scheduler::do_schedule_named( - [1u8; 32], - DispatchTime::At(4), - Some((3, 3)), - 127, - root(), - >::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })) - .unwrap(), - ) - .unwrap(); - // same id results in error. - assert!(Scheduler::do_schedule_named( - [1u8; 32], - DispatchTime::At(4), - None, - 127, - root(), - >::new(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10) - })) - .unwrap(), - ) - .is_err()); - // different id is ok. - Scheduler::do_schedule_named( - [2u8; 32], - DispatchTime::At(8), - None, - 127, - root(), - >::new(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })) - .unwrap(), - ) - .unwrap(); - run_to_block(3); - assert!(logger::log().is_empty()); - run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(6); - assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32])); - run_to_block(100); - assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); - }); -} - -#[test] -fn scheduler_respects_weight_limits() { - let max_weight: Weight = ::MaximumWeight::get(); - new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: max_weight / 3 * 2, - }); - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - None, - 127, - root(), - >::new(call).unwrap(), - )); - let call = RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: max_weight / 3 * 2, - }); - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - None, - 127, - root(), - >::new(call).unwrap(), - )); - // 69 and 42 do not fit together - run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(5); - assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); - }); -} - -/// Permanently overweight calls are not deleted but also not executed. -#[test] -fn scheduler_does_not_delete_permanently_overweight_call() { - let max_weight: Weight = ::MaximumWeight::get(); - new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: max_weight, - }); - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - None, - 127, - root(), - >::new(call).unwrap(), - )); - // Never executes. - run_to_block(100); - assert_eq!(logger::log(), vec![]); - - // Assert the `PermanentlyOverweight` event. - assert_eq!( - System::events().last().unwrap().event, - crate::Event::PermanentlyOverweight { - task: (4, 0), - id: None - } - .into(), - ); - // The call is still in the agenda. - assert!(Agenda::::get(4).agenda[0].is_some()); - }); -} - -#[test] -fn scheduler_periodic_tasks_always_find_place() { - let max_weight: Weight = ::MaximumWeight::get(); - let max_per_block = ::MaxScheduledPerBlock::get(); - - new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: (max_weight / 3) * 2, - }); - let call = >::new(call).unwrap(); - - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - Some((4, u32::MAX)), - 127, - root(), - call.clone(), - )); - // Executes 5 times till block 20. - run_to_block(20); - assert_eq!(logger::log().len(), 5); - - // Block 28 will already be full. - for _ in 0..max_per_block { - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(28), - None, - 120, - root(), - call.clone(), - )); - } - - run_to_block(24); - assert_eq!(logger::log().len(), 6); - - // The periodic task should be postponed - assert_eq!(>::get(29).agenda.len(), 1); - - run_to_block(27); // will call on_initialize(28) - assert_eq!(logger::log().len(), 6); - - run_to_block(28); // will call on_initialize(29) - assert_eq!(logger::log().len(), 7); - }); -} - -#[test] -fn scheduler_respects_priority_ordering() { - let max_weight: Weight = ::MaximumWeight::get(); - new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: max_weight / 3, - }); - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - None, - 1, - root(), - >::new(call).unwrap(), - )); - let call = RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: max_weight / 3, - }); - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - None, - 0, - root(), - >::new(call).unwrap(), - )); - run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 69u32), (root(), 42u32)]); - }); -} - -#[test] -fn scheduler_respects_priority_ordering_with_soft_deadlines() { - new_test_ext().execute_with(|| { - let max_weight: Weight = ::MaximumWeight::get(); - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: max_weight / 5 * 2, - }); - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - None, - 255, - root(), - >::new(call).unwrap(), - )); - let call = RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: max_weight / 5 * 2, - }); - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - None, - 127, - root(), - >::new(call).unwrap(), - )); - let call = RuntimeCall::Logger(LoggerCall::log { - i: 2600, - weight: max_weight / 5 * 4, - }); - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), - None, - 126, - root(), - >::new(call).unwrap(), - )); - - // 2600 does not fit with 69 or 42, but has higher priority, so will go through - run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 2600u32)]); - // 69 and 42 fit together - run_to_block(5); - assert_eq!( - logger::log(), - vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)] - ); - }); -} - -#[test] -fn on_initialize_weight_is_correct() { - new_test_ext().execute_with(|| { - let call_weight = Weight::from_ref_time(25); - - // Named - let call = RuntimeCall::Logger(LoggerCall::log { - i: 3, - weight: call_weight + Weight::from_ref_time(1), - }); - assert_ok!(Scheduler::do_schedule_named( - [1u8; 32], - DispatchTime::At(3), - None, - 255, - root(), - >::new(call).unwrap(), - )); - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: call_weight + Weight::from_ref_time(2), - }); - // Anon Periodic - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(2), - Some((1000, 3)), - 128, - root(), - >::new(call).unwrap(), - )); - let call = RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: call_weight + Weight::from_ref_time(3), - }); - // Anon - assert_ok!(Scheduler::do_schedule( - DispatchTime::At(2), - None, - 127, - root(), - >::new(call).unwrap(), - )); - // Named Periodic - let call = RuntimeCall::Logger(LoggerCall::log { - i: 2600, - weight: call_weight + Weight::from_ref_time(4), - }); - assert_ok!(Scheduler::do_schedule_named( - [2u8; 32], - DispatchTime::At(1), - Some((1000, 3)), - 126, - root(), - >::new(call).unwrap(), - )); - - // Will include the named periodic only - assert_eq!( - Scheduler::on_initialize(1), - TestWeightInfo::service_agendas_base() - + TestWeightInfo::service_agenda_base(1) - + >::service_task(None, true, true) - + TestWeightInfo::execute_dispatch_unsigned() - + call_weight + Weight::from_ref_time(4) - ); - assert_eq!(IncompleteSince::::get(), None); - assert_eq!(logger::log(), vec![(root(), 2600u32)]); - - // Will include anon and anon periodic - assert_eq!( - Scheduler::on_initialize(2), - TestWeightInfo::service_agendas_base() - + TestWeightInfo::service_agenda_base(2) - + >::service_task(None, false, true) - + TestWeightInfo::execute_dispatch_unsigned() - + call_weight + Weight::from_ref_time(3) - + >::service_task(None, false, false) - + TestWeightInfo::execute_dispatch_unsigned() - + call_weight + Weight::from_ref_time(2) - ); - assert_eq!(IncompleteSince::::get(), None); - assert_eq!( - logger::log(), - vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)] - ); - - // Will include named only - assert_eq!( - Scheduler::on_initialize(3), - TestWeightInfo::service_agendas_base() - + TestWeightInfo::service_agenda_base(1) - + >::service_task(None, true, false) - + TestWeightInfo::execute_dispatch_unsigned() - + call_weight + Weight::from_ref_time(1) - ); - assert_eq!(IncompleteSince::::get(), None); - assert_eq!( - logger::log(), - vec![ - (root(), 2600u32), - (root(), 69u32), - (root(), 42u32), - (root(), 3u32) - ] - ); - - // Will contain none - let actual_weight = Scheduler::on_initialize(4); - assert_eq!( - actual_weight, - TestWeightInfo::service_agendas_base() + TestWeightInfo::service_agenda_base(0) - ); - }); -} - -#[test] -fn root_calls_works() { - new_test_ext().execute_with(|| { - let call = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })); - let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })); - assert_ok!(Scheduler::schedule_named( - RuntimeOrigin::root(), - [1u8; 32], - 4, - None, - Some(127), - call, - )); - assert_ok!(Scheduler::schedule( - RuntimeOrigin::root(), - 4, - None, - Some(127), - call2 - )); - run_to_block(3); - // Scheduled calls are in the agenda. - assert_eq!(Agenda::::get(4).agenda.len(), 2); - assert!(logger::log().is_empty()); - assert_ok!(Scheduler::cancel_named(RuntimeOrigin::root(), [1u8; 32])); - assert_ok!(Scheduler::cancel(RuntimeOrigin::root(), 4, 1)); - // Scheduled calls are made NONE, so should not effect state - run_to_block(100); - assert!(logger::log().is_empty()); - }); -} - -#[test] -fn fails_to_schedule_task_in_the_past() { - new_test_ext().execute_with(|| { - run_to_block(3); - - let call1 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })); - let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })); - let call3 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })); - - assert_noop!( - Scheduler::schedule_named(RuntimeOrigin::root(), [1u8; 32], 2, None, Some(127), call1), - Error::::TargetBlockNumberInPast, - ); - - assert_noop!( - Scheduler::schedule(RuntimeOrigin::root(), 2, None, Some(127), call2), - Error::::TargetBlockNumberInPast, - ); - - assert_noop!( - Scheduler::schedule(RuntimeOrigin::root(), 3, None, Some(127), call3), - Error::::TargetBlockNumberInPast, - ); - }); -} - -#[test] -fn should_use_origin() { - new_test_ext().execute_with(|| { - let call = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })); - let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })); - assert_ok!(Scheduler::schedule_named( - system::RawOrigin::Signed(1).into(), - [1u8; 32], - 4, - None, - None, - call, - )); - assert_ok!(Scheduler::schedule( - system::RawOrigin::Signed(1).into(), - 4, - None, - None, - call2, - )); - run_to_block(3); - // Scheduled calls are in the agenda. - assert_eq!(Agenda::::get(4).agenda.len(), 2); - assert!(logger::log().is_empty()); - assert_ok!(Scheduler::cancel_named( - system::RawOrigin::Signed(1).into(), - [1u8; 32] - )); - assert_ok!(Scheduler::cancel(system::RawOrigin::Signed(1).into(), 4, 1)); - // Scheduled calls are made NONE, so should not effect state - run_to_block(100); - assert!(logger::log().is_empty()); - }); -} - -#[test] -fn should_check_origin() { - new_test_ext().execute_with(|| { - let call = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(10), - })); - let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - })); - assert_noop!( - Scheduler::schedule_named( - system::RawOrigin::Signed(2).into(), - [1u8; 32], - 4, - None, - None, - call - ), - BadOrigin - ); - assert_noop!( - Scheduler::schedule(system::RawOrigin::Signed(2).into(), 4, None, None, call2), - BadOrigin - ); - }); -} - -#[test] -fn should_check_origin_for_cancel() { - new_test_ext().execute_with(|| { - let call = Box::new(RuntimeCall::Logger(LoggerCall::log_without_filter { - i: 69, - weight: Weight::from_ref_time(10), - })); - let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log_without_filter { - i: 42, - weight: Weight::from_ref_time(10), - })); - assert_ok!(Scheduler::schedule_named( - system::RawOrigin::Signed(1).into(), - [1u8; 32], - 4, - None, - None, - call, - )); - assert_ok!(Scheduler::schedule( - system::RawOrigin::Signed(1).into(), - 4, - None, - None, - call2, - )); - run_to_block(3); - // Scheduled calls are in the agenda. - assert_eq!(Agenda::::get(4).agenda.len(), 2); - assert!(logger::log().is_empty()); - assert_noop!( - Scheduler::cancel_named(system::RawOrigin::Signed(2).into(), [1u8; 32]), - BadOrigin - ); - assert_noop!( - Scheduler::cancel(system::RawOrigin::Signed(2).into(), 4, 1), - BadOrigin - ); - assert_noop!( - Scheduler::cancel_named(system::RawOrigin::Root.into(), [1u8; 32]), - BadOrigin - ); - assert_noop!( - Scheduler::cancel(system::RawOrigin::Root.into(), 4, 1), - BadOrigin - ); - run_to_block(5); - assert_eq!( - logger::log(), - vec![ - (system::RawOrigin::Signed(1).into(), 69u32), - (system::RawOrigin::Signed(1).into(), 42u32) - ] - ); - }); -} - -/// Cancelling a call and then scheduling a second call for the same -/// block results in different addresses. -#[test] -fn schedule_does_not_resuse_addr() { - new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - }); - - // Schedule both calls. - let addr_1 = Scheduler::do_schedule( - DispatchTime::At(4), - None, - 127, - root(), - >::new(call.clone()).unwrap(), - ) - .unwrap(); - // Cancel the call. - assert_ok!(Scheduler::do_cancel(None, addr_1)); - let addr_2 = Scheduler::do_schedule( - DispatchTime::At(4), - None, - 127, - root(), - >::new(call).unwrap(), - ) - .unwrap(); - - // Should not re-use the address. - assert!(addr_1 != addr_2); - }); -} - -#[test] -fn schedule_agenda_overflows() { - let max: u32 = ::MaxScheduledPerBlock::get(); - - new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - }); - let call = >::new(call).unwrap(); - - // Schedule the maximal number allowed per block. - for _ in 0..max { - Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.clone()).unwrap(); - } - - // One more time and it errors. - assert_noop!( - Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call,), - >::AgendaIsExhausted, - ); - - run_to_block(4); - // All scheduled calls are executed. - assert_eq!(logger::log().len() as u32, max); - }); -} - -/// Cancelling and scheduling does not overflow the agenda but fills holes. -#[test] -fn cancel_and_schedule_fills_holes() { - let max: u32 = ::MaxScheduledPerBlock::get(); - assert!( - max > 3, - "This test only makes sense for MaxScheduledPerBlock > 3" - ); - - new_test_ext().execute_with(|| { - let call = RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: Weight::from_ref_time(10), - }); - let call = >::new(call).unwrap(); - let mut addrs = Vec::<_>::default(); - - // Schedule the maximal number allowed per block. - for _ in 0..max { - addrs.push( - Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.clone()) - .unwrap(), - ); - } - // Cancel three of them. - for addr in addrs.into_iter().take(3) { - Scheduler::do_cancel(None, addr).unwrap(); - } - // Schedule three new ones. - for i in 0..3 { - let (_block, index) = - Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.clone()) - .unwrap(); - assert_eq!(i, index); - } - - run_to_block(4); - // Maximum number of calls are executed. - assert_eq!(logger::log().len() as u32, max); - }); -} - -#[test] -fn cannot_schedule_too_big_tasks() { - new_test_ext().execute_with(|| { - let call = Box::new(<::RuntimeCall>::from(SystemCall::remark { - remark: vec![0; EncodedCall::bound() - 4], - })); - - assert_ok!(Scheduler::schedule( - RuntimeOrigin::root(), - 4, - None, - Some(127), - call - )); - - let call = Box::new(<::RuntimeCall>::from(SystemCall::remark { - remark: vec![0; EncodedCall::bound() - 3], - })); - - assert_err!( - Scheduler::schedule(RuntimeOrigin::root(), 4, None, Some(127), call), - >::TooBigScheduledCall - ); - }); -} diff --git a/pallets/scheduler-v2/src/weights.rs b/pallets/scheduler-v2/src/weights.rs deleted file mode 100644 index 1aaa19377d..0000000000 --- a/pallets/scheduler-v2/src/weights.rs +++ /dev/null @@ -1,234 +0,0 @@ -// Template adopted from https://github.com/paritytech/substrate/blob/master/.maintain/frame-weight-template.hbs - -//! Autogenerated weights for pallet_unique_scheduler_v2 -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-10-28, STEPS: `50`, REPEAT: 80, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 - -// Executed Command: -// target/release/unique-collator -// benchmark -// pallet -// --pallet -// pallet-unique-scheduler-v2 -// --wasm-execution -// compiled -// --extrinsic -// * -// --template -// .maintain/frame-weight-template.hbs -// --steps=50 -// --repeat=80 -// --heap-pages=4096 -// --output=./pallets/scheduler-v2/src/weights.rs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] -#![allow(clippy::unnecessary_cast)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for pallet_unique_scheduler_v2. -pub trait WeightInfo { - fn service_agendas_base() -> Weight; - fn service_agenda_base(s: u32, ) -> Weight; - fn service_task_base() -> Weight; - fn service_task_named() -> Weight; - fn service_task_periodic() -> Weight; - fn execute_dispatch_signed() -> Weight; - fn execute_dispatch_unsigned() -> Weight; - fn schedule(s: u32, ) -> Weight; - fn cancel(s: u32, ) -> Weight; - fn schedule_named(s: u32, ) -> Weight; - fn cancel_named(s: u32, ) -> Weight; - fn change_named_priority(s: u32, ) -> Weight; -} - -/// Weights for pallet_unique_scheduler_v2 using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - // Storage: Scheduler IncompleteSince (r:1 w:1) - fn service_agendas_base() -> Weight { - Weight::from_ref_time(5_253_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Scheduler Agenda (r:1 w:1) - fn service_agenda_base(s: u32, ) -> Weight { - Weight::from_ref_time(3_858_000 as u64) - // Standard Error: 2_617 - .saturating_add(Weight::from_ref_time(579_704 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: System LastRuntimeUpgrade (r:1 w:0) - fn service_task_base() -> Weight { - Weight::from_ref_time(10_536_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - } - // Storage: System LastRuntimeUpgrade (r:1 w:0) - // Storage: Scheduler Lookup (r:0 w:1) - fn service_task_named() -> Weight { - Weight::from_ref_time(12_018_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: System LastRuntimeUpgrade (r:1 w:0) - fn service_task_periodic() -> Weight { - Weight::from_ref_time(10_669_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - } - // Storage: System Account (r:1 w:1) - // Storage: System AllExtrinsicsLen (r:1 w:1) - // Storage: System BlockWeight (r:1 w:1) - // Storage: Configuration WeightToFeeCoefficientOverride (r:1 w:0) - // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) - fn execute_dispatch_signed() -> Weight { - Weight::from_ref_time(36_083_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } - fn execute_dispatch_unsigned() -> Weight { - Weight::from_ref_time(4_386_000 as u64) - } - // Storage: Scheduler Agenda (r:1 w:1) - fn schedule(s: u32, ) -> Weight { - Weight::from_ref_time(17_257_000 as u64) - // Standard Error: 2_791 - .saturating_add(Weight::from_ref_time(574_832 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Scheduler Agenda (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn cancel(s: u32, ) -> Weight { - Weight::from_ref_time(19_803_000 as u64) - // Standard Error: 1_177 - .saturating_add(Weight::from_ref_time(475_027 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - } - // Storage: Scheduler Lookup (r:1 w:1) - // Storage: Scheduler Agenda (r:1 w:1) - fn schedule_named(s: u32, ) -> Weight { - Weight::from_ref_time(18_746_000 as u64) - // Standard Error: 2_997 - .saturating_add(Weight::from_ref_time(635_697 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - } - // Storage: Scheduler Lookup (r:1 w:1) - // Storage: Scheduler Agenda (r:1 w:1) - fn cancel_named(s: u32, ) -> Weight { - Weight::from_ref_time(20_983_000 as u64) - // Standard Error: 1_850 - .saturating_add(Weight::from_ref_time(518_812 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - } - // Storage: Scheduler Lookup (r:1 w:0) - // Storage: Scheduler Agenda (r:1 w:1) - fn change_named_priority(s: u32, ) -> Weight { - Weight::from_ref_time(21_591_000 as u64) - // Standard Error: 4_187 - .saturating_add(Weight::from_ref_time(531_231 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - // Storage: Scheduler IncompleteSince (r:1 w:1) - fn service_agendas_base() -> Weight { - Weight::from_ref_time(5_253_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: Scheduler Agenda (r:1 w:1) - fn service_agenda_base(s: u32, ) -> Weight { - Weight::from_ref_time(3_858_000 as u64) - // Standard Error: 2_617 - .saturating_add(Weight::from_ref_time(579_704 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: System LastRuntimeUpgrade (r:1 w:0) - fn service_task_base() -> Weight { - Weight::from_ref_time(10_536_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - } - // Storage: System LastRuntimeUpgrade (r:1 w:0) - // Storage: Scheduler Lookup (r:0 w:1) - fn service_task_named() -> Weight { - Weight::from_ref_time(12_018_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: System LastRuntimeUpgrade (r:1 w:0) - fn service_task_periodic() -> Weight { - Weight::from_ref_time(10_669_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - } - // Storage: System Account (r:1 w:1) - // Storage: System AllExtrinsicsLen (r:1 w:1) - // Storage: System BlockWeight (r:1 w:1) - // Storage: Configuration WeightToFeeCoefficientOverride (r:1 w:0) - // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) - fn execute_dispatch_signed() -> Weight { - Weight::from_ref_time(36_083_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - } - fn execute_dispatch_unsigned() -> Weight { - Weight::from_ref_time(4_386_000 as u64) - } - // Storage: Scheduler Agenda (r:1 w:1) - fn schedule(s: u32, ) -> Weight { - Weight::from_ref_time(17_257_000 as u64) - // Standard Error: 2_791 - .saturating_add(Weight::from_ref_time(574_832 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: Scheduler Agenda (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn cancel(s: u32, ) -> Weight { - Weight::from_ref_time(19_803_000 as u64) - // Standard Error: 1_177 - .saturating_add(Weight::from_ref_time(475_027 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - } - // Storage: Scheduler Lookup (r:1 w:1) - // Storage: Scheduler Agenda (r:1 w:1) - fn schedule_named(s: u32, ) -> Weight { - Weight::from_ref_time(18_746_000 as u64) - // Standard Error: 2_997 - .saturating_add(Weight::from_ref_time(635_697 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - } - // Storage: Scheduler Lookup (r:1 w:1) - // Storage: Scheduler Agenda (r:1 w:1) - fn cancel_named(s: u32, ) -> Weight { - Weight::from_ref_time(20_983_000 as u64) - // Standard Error: 1_850 - .saturating_add(Weight::from_ref_time(518_812 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - } - // Storage: Scheduler Lookup (r:1 w:0) - // Storage: Scheduler Agenda (r:1 w:1) - fn change_named_priority(s: u32, ) -> Weight { - Weight::from_ref_time(21_591_000 as u64) - // Standard Error: 4_187 - .saturating_add(Weight::from_ref_time(531_231 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } -} diff --git a/runtime/opal/Cargo.toml b/runtime/opal/Cargo.toml index 03ec7e5700..047c081765 100644 --- a/runtime/opal/Cargo.toml +++ b/runtime/opal/Cargo.toml @@ -329,7 +329,6 @@ pallet-scheduler = { workspace = true } pallet-refungible = { workspace = true } pallet-structure = { workspace = true } pallet-unique = { workspace = true } -pallet-unique-scheduler-v2 = { workspace = true } precompile-utils-macro = { workspace = true } scale-info = { workspace = true } up-common = { workspace = true } diff --git a/test-pallets/utils/Cargo.toml b/test-pallets/utils/Cargo.toml index 5c704a403d..83db7b4351 100644 --- a/test-pallets/utils/Cargo.toml +++ b/test-pallets/utils/Cargo.toml @@ -12,8 +12,6 @@ codec = { workspace = true, package = "parity-scale-codec" } frame-support = { workspace = true } frame-system = { workspace = true } scale-info = { workspace = true } -# pallet-unique-scheduler = { path = '../../pallets/scheduler', default-features = false } -pallet-unique-scheduler-v2 = { workspace = true } sp-std = { workspace = true } [features] @@ -22,7 +20,6 @@ std = [ "codec/std", "frame-support/std", "frame-system/std", - "pallet-unique-scheduler-v2/std", "scale-info/std", "sp-std/std", ] diff --git a/test-pallets/utils/src/lib.rs b/test-pallets/utils/src/lib.rs index 7b538d447e..05459931b8 100644 --- a/test-pallets/utils/src/lib.rs +++ b/test-pallets/utils/src/lib.rs @@ -29,7 +29,6 @@ pub mod pallet { }; use frame_system::pallet_prelude::*; use sp_std::vec::Vec; - // use pallet_unique_scheduler_v2::{TaskName, Pallet as SchedulerPallet}; #[pallet::config] pub trait Config: frame_system::Config /*+ pallet_unique_scheduler_v2::Config*/ { From d893d0cab9bc21eb373ee0dffb7832c917ee1ddd Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:06:34 +0200 Subject: [PATCH 090/143] ci: drop zombienet --- .../zombienet-forkless-data.toml | 83 ---- .../zombienet-forkless-nodata.toml | 39 -- README.md | 16 +- tests/src/util/frankenstein.ts | 382 ------------------ 4 files changed, 1 insertion(+), 519 deletions(-) delete mode 100644 .docker/forkless-config/zombienet-forkless-data.toml delete mode 100644 .docker/forkless-config/zombienet-forkless-nodata.toml delete mode 100644 tests/src/util/frankenstein.ts diff --git a/.docker/forkless-config/zombienet-forkless-data.toml b/.docker/forkless-config/zombienet-forkless-data.toml deleted file mode 100644 index 84154e6873..0000000000 --- a/.docker/forkless-config/zombienet-forkless-data.toml +++ /dev/null @@ -1,83 +0,0 @@ -[settings] -provider = "native" - -[relaychain] -default_command = "/polkadot/target/release/polkadot" -default_args = [ "-lparachain::candidate_validation=debug", "-lxcm=trace", ] -chain = "{{ RELAY_CHAIN_TYPE }}-local" - - [[relaychain.nodes]] - name = "relay-alice" - rpc_port = 9844 - p2p_port = 30444 - prometheus_port = 33044 - prometheus = false - # Zombienet does not grant the default balance to nodes created with [[nodes]]. - balance = 2000000000000 - - [[relaychain.nodes]] - name = "relay-bob" - rpc_port = 9855 - p2p_port = 30555 - prometheus_port = 33055 - prometheus = false - # Zombienet does not grant the default balance to nodes created with [[nodes]]. - balance = 2000000000000 - - [[relaychain.nodes]] - name = "relay-charlie" - rpc_port = 9866 - p2p_port = 30666 - prometheus_port = 33066 - prometheus = false - # Zombienet does not grant the default balance to nodes created with [[nodes]]. - balance = 2000000000000 - - [[relaychain.nodes]] - name = "relay-dave" - rpc_port = 9877 - p2p_port = 30777 - prometheus_port = 33077 - prometheus = false - # Zombienet does not grant the default balance to nodes created with [[nodes]]. - balance = 2000000000000 - - [[relaychain.nodes]] - name = "relay-eve" - rpc_port = 9888 - p2p_port = 3088 - prometheus_port = 33088 - prometheus = false - # Zombienet does not grant the default balance to nodes created with [[nodes]]. - balance = 2000000000000 - -[[parachains]] -id = 1000 -chain_spec_modifier_commands = [[ - "chainql", - "--tla-code=rawSpec=import '{{'raw'|chainSpec}}'", - "--tla-str=forkFrom={{ REPLICA_FROM }}", - "fork.jsonnet", -]] - - [[parachains.collators]] - name = "alice" - command = "/unique-chain/current/release/unique-collator" - rpc_port = 9944 - p2p_port = 31200 - prometheus_port = 33144 - prometheus = false - args = [ - "-lxcm=trace,parity_ws::handler=debug,jsonrpsee_core=trace,jsonrpsee-core=trace,jsonrpsee_ws_server=debug", - ] - - [[parachains.collators]] - name = "bob" - command = "/unique-chain/current/release/unique-collator" - rpc_port = 9945 - p2p_port = 31201 - prometheus_port = 33155 - prometheus = false - args = [ - "-lxcm=trace,parity_ws::handler=debug,jsonrpsee_core=trace,jsonrpsee-core=trace,jsonrpsee_ws_server=debug", - ] diff --git a/.docker/forkless-config/zombienet-forkless-nodata.toml b/.docker/forkless-config/zombienet-forkless-nodata.toml deleted file mode 100644 index 19dc8cd9f8..0000000000 --- a/.docker/forkless-config/zombienet-forkless-nodata.toml +++ /dev/null @@ -1,39 +0,0 @@ -[settings] -provider = "native" - -[relaychain] -default_command = "/polkadot/target/release/polkadot" -default_args = [ "-lparachain::candidate_validation=debug", "-lxcm=trace", ] -chain = "{{ RELAY_CHAIN_TYPE }}-local" - - [[relaychain.nodes]] - name = "relay-alice" - rpc_port = 9844 - p2p_port = 30444 - - [[relaychain.node_groups]] - name = "relay" - count = 4 - -[[parachains]] -id = 1000 - - [[parachains.collators]] - name = "alice" - command = "/unique-chain/current/release/unique-collator" - rpc_port = 9944 - p2p_port = 31200 - args = [ - "-lxcm=trace,parity_ws::handler=debug,jsonrpsee_core=trace,jsonrpsee-core=trace,jsonrpsee_ws_server=debug", - "--ws-max-connections=1000", - ] - - [[parachains.collators]] - name = "bob" - command = "/unique-chain/current/release/unique-collator" - rpc_port = 9945 - p2p_port = 31201 - args = [ - "-lxcm=trace,parity_ws::handler=debug,jsonrpsee_core=trace,jsonrpsee-core=trace,jsonrpsee_ws_server=debug", - "--ws-max-connections=1000", - ] diff --git a/README.md b/README.md index b0548b5332..ef52b5fe75 100644 --- a/README.md +++ b/README.md @@ -84,13 +84,6 @@ cargo build --features=unique-runtime --release Note: checkout this project and all related projects (see below) in the sibling folders (both under the same folder) -### Polkadot launch utility - -``` -git clone https://github.com/UniqueNetwork/polkadot-launch.git -git checkout unique-network -``` - ### Build relay ``` @@ -118,14 +111,7 @@ make build-release ## Running as Parachain locally -``` -./launch-testnet.sh -``` - -Optional, full setup with Acala and Statemint -``` -./launch-testnet-full.sh -``` +TODO ## Run Integration Tests diff --git a/tests/src/util/frankenstein.ts b/tests/src/util/frankenstein.ts deleted file mode 100644 index b078d99293..0000000000 --- a/tests/src/util/frankenstein.ts +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright 2019-2023 Unique Network (Gibraltar) Ltd. -// This file is part of Unique Network. - -// Unique Network is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Unique Network is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Unique Network. If not, see . - -import {ApiPromise} from '@polkadot/api'; -import {blake2AsHex, cryptoWaitReady} from '@polkadot/util-crypto'; -import zombie from '@zombienet/orchestrator/dist'; -import {readNetworkConfig} from '@zombienet/utils/dist'; -import {resolve} from 'path'; -import {usingPlaygrounds} from '.'; -import {migrations} from './frankensteinMigrate'; -import fs from 'fs'; - -const ZOMBIENET_CREDENTIALS = process.env.ZOMBIENET_CREDENTIALS || '../.env'; -const NETWORK_CONFIG_FILE = process.argv[2] ?? '../launch-zombienet.toml'; -const PARA_DIR = process.env.PARA_DIR || '../'; -const RELAY_DIR = process.env.RELAY_DIR || '../../polkadot/'; -const REPLICA_FROM = process.env.REPLICA_FROM || 'wss://ws-opal.unique.network:443'; -const NEW_RELAY_BIN = process.env.NEW_RELAY_BIN; -const NEW_RELAY_WASM = process.env.NEW_RELAY_WASM; -const NEW_PARA_BIN = process.env.NEW_PARA_BIN; -const NEW_PARA_WASM = process.env.NEW_PARA_WASM; -const DESTINATION_SPEC_VERSION = process.env.DESTINATION_SPEC_VERSION!; -const PARACHAIN_BLOCK_TIME = 12_000; -const SUPERUSER_KEY = '//Alice'; - -let network: zombie.Network | undefined; - -// Stop the network if it is running -const stop = async () => { - await network?.stop(); -}; - -// Promise of a timeout -function delay(ms: number) { - return new Promise(resolve => setTimeout(resolve, ms)); -} - -// Countdown with time left on display -async function waitWithTimer(time: number) { - const secondsTotal = Math.ceil(time / 1000); - for(let i = secondsTotal; i > 0; i--) { - // could also introduce hours, but wth - const seconds = i % 60; - const text = `Time left: ${Math.floor(i / 60)}:${seconds < 10 ? '0' + seconds : seconds}`; - if(process.stdout.isTTY) - process.stdout.write(text); - else if(seconds % 10 == 0) - console.log(text); - await delay(1000); - if(process.stdout.isTTY) { - process.stdout.clearLine(0); - process.stdout.cursorTo(0); - } - } -} - -// Get the runtime's current version -function getSpecVersion(api: ApiPromise): number { - return (api.consts.system.version as any).specVersion.toNumber(); -} - -// Get the required information on the relay chain -function getRelayInfo(api: ApiPromise): {specVersion: number, epochBlockLength: number, blockTime: number, epochTime: number} { - const info = { - specVersion: getSpecVersion(api), - epochBlockLength: (api.consts.babe.epochDuration as any).toNumber(), - blockTime: (api.consts.babe.expectedBlockTime as any).toNumber(), - epochTime: 0, - }; - info.epochTime = info.epochBlockLength * info.blockTime; - return info; -} - -// Enable or disable maintenance mode if present on the chain -async function toggleMaintenanceMode(value: boolean, wsUri: string, retries = 5) { - try { - await usingPlaygrounds(async (helper, privateKey) => { - const superuser = await privateKey(SUPERUSER_KEY); - try { - const toggle = value ? 'enable' : 'disable'; - await helper.getSudo().executeExtrinsic(superuser, `api.tx.maintenance.${toggle}`, []); - console.log(`Maintenance mode ${value ? 'engaged' : 'disengaged'}.`); - } catch (e) { - console.error('Couldn\'t set maintenance mode. The maintenance pallet probably does not exist. Log:', e); - } - }, wsUri); - } catch (error) { - console.error(error); - console.log('Trying for retry toggle maintanence mode'); - await delay(12_000); - await toggleMaintenanceMode(value, wsUri, retries - 1); - } -} - -async function skipIfAlreadyUpgraded() { - await usingPlaygrounds(async (helper) => { - const specVersion = await getSpecVersion(helper.getApi()); - if(`v${specVersion}` === DESTINATION_SPEC_VERSION) { - console.log('\n🛸 Current version equal DESTINATION_SPEC_VERSION 🛸'); - console.log("\n🛸 PARACHAINS' RUNTIME UPGRADE TESTING COMPLETE 🛸"); - } - }, REPLICA_FROM); -} - -const raiseZombienet = async (): Promise => { - await skipIfAlreadyUpgraded(); - const isUpgradeTesting = !!NEW_RELAY_BIN || !!NEW_RELAY_WASM || !!NEW_PARA_BIN || !!NEW_PARA_WASM; - /* - // If there is nothing to upgrade, what is the point - if (!isUpgradeTesting) { - console.warn('\nNeither the relay nor the parachain were selected for an upgrade! ' + - 'Please pass environment vars `NEW_RELAY_BIN`, `NEW_RELAY_WASM`, `NEW_PARA_BIN`, `NEW_PARA_WASM`.'); - process.exit(1); - } - */ - - // an unsavory practice, but very convenient, mwahahah - process.env.PARA_DIR = PARA_DIR; - process.env.RELAY_DIR = RELAY_DIR; - process.env.REPLICA_FROM = REPLICA_FROM; - - const configPath = resolve(NETWORK_CONFIG_FILE); - const networkConfig = readNetworkConfig(configPath); - // console.log(networkConfig); - if(networkConfig.settings.provider !== 'native') { - throw new Error(`Oh no! Expected native network, got ${networkConfig.settings.provider}.`); - } - - await cryptoWaitReady(); - - // Launch Zombienet! - network = await zombie.start(ZOMBIENET_CREDENTIALS, networkConfig, {silent: false}); - - // Get the relay chain info like the epoch length and spec version - // Then restart each parachain's binaries - // // Stop and restart each node - // // Send specified keys to parachain nodes in case the parachain requires it - // If it is not needed to upgrade runtimes themselves, the job is done! - - // Record some required information regarding the relay chain - await network.relay[0].connectApi(); - let relayInfo = getRelayInfo((network.relay[0] as any).apiInstance!); - await network.relay[0].apiInstance!.disconnect(); - if(isUpgradeTesting) { - console.log('Relay stats:', relayInfo); - } - - // non-exported functionality of NativeClient - const networkClient = (network.client as any); - - if(NEW_RELAY_BIN) { - console.log('\n🧶 Restarting relay nodes'); - - for(const [index, node] of network.relay.entries()) { - await node.apiInstance?.disconnect(); - - console.log(`\n🚦 Starting timeout for the epoch change (node ${index + 1}/${network.relay.length})...`); - await waitWithTimer(relayInfo.epochTime); - - // Replace the node-starting command with the new binary - const cmd = networkClient.processMap[node.name].cmd[0].split(' ')[0]; - networkClient.processMap[node.name].cmd = networkClient.processMap[node.name].cmd.map((arg: string) => arg.replace(cmd, NEW_RELAY_BIN)); - - await node.restart(); - } - - console.log('\n🌒 All relay nodes restarted with the new binaries.'); - } - - if(NEW_PARA_BIN) { - for(const paraId in network.paras) { - const para = network.paras[paraId]; - console.log(`\n🧶 Restarting collator nodes of parachain ${paraId}`); - - for(const [_index, node] of para.nodes.entries()) { - await node.apiInstance?.disconnect(); - - // Replace the node-starting command with the new binary - const cmd = networkClient.processMap[node.name].cmd[0].split(' ')[0]; - networkClient.processMap[node.name].cmd = networkClient.processMap[node.name].cmd.map((arg: string) => arg.replace(cmd, NEW_PARA_BIN)); - - await node.restart(); - // applyaurakey? - // Zombienet handles it on first-time node creation - } - } - - console.log('\n🌗 All parachain collators restarted with the new binaries.'); - } - - // Re-establish connection to the relay node and get the runtime upgrade validation delay for parachains - // For the relay, connect and set the new runtime code - // For each parachain, connect, authorize and upgrade its runtime - // Ping the the chains for the runtime upgrade after the minimal time and then every few blocks - // // For each parachain, re-connect and verify that the runtime upgrade is successful - - let relayUpgradeCompleted = false, paraUpgradeCompleted = false; - - if(NEW_RELAY_WASM) { - const relayOldVersion = relayInfo.specVersion; - console.log('\n🚦 Starting timeout for the next epoch before upgrading the relay runtime code...'); - await waitWithTimer(relayInfo.epochTime); - - console.log('--- Upgrading the relay chain runtime \t---'); - - // Read the new WASM code and set it as the relay's new code - const code = fs.readFileSync(NEW_RELAY_WASM).toString('hex'); - await usingPlaygrounds(async (helper, privateKey) => { - const superuser = await privateKey(SUPERUSER_KEY); - - const result = await helper.executeExtrinsic( - superuser, - 'api.tx.sudo.sudoUncheckedWeight', - [helper.constructApiCall('api.tx.system.setCode', [`0x${code}`]), {}], - ); - - if(result.status == 'Fail') { - console.error('Failed to upgrade the runtime:', result); - } - - // Get the updated information from the relay's new runtime - relayInfo = getRelayInfo(helper.getApi()); - }, network.relay[0].wsUri); - - if(relayOldVersion != relayInfo.specVersion) { - // eslint-disable-next-line no-useless-escape - console.log(`\n\🛰️ The relay has successfully upgraded from version ${relayOldVersion} to ${relayInfo.specVersion}!`); - relayUpgradeCompleted = true; - } else { - console.error(`\nThe relay did not upgrade from version ${relayOldVersion}!`); - } - } else { - // If the relay did not need to be upgraded, it's already technically completed - relayUpgradeCompleted = true; - } - - if(NEW_PARA_WASM) { - let codeValidationDelayBlocks = 0; - const upgradingParas: {[id: string]: {version: number, upgraded: boolean}} = {}; - // Calculate the code validation delay of the relay chain, - // so that we know how much to wait before the parachains can be upgraded after the extrinsic - await usingPlaygrounds(async (helper) => { - const {validationUpgradeDelay, minimumValidationUpgradeDelay} = - (await helper.callRpc('api.query.configuration.activeConfig', [])).toJSON() as any; - - codeValidationDelayBlocks = Math.max(validationUpgradeDelay ?? 0, minimumValidationUpgradeDelay ?? 0); - }, network.relay[0].wsUri); - - // Wait for the next epoch so that the parachains will start cooperating with the relay - if(relayUpgradeCompleted && NEW_RELAY_WASM) { - console.log('\n🚥 Starting timeout for the next epoch before upgrading the parachains code...'); - await waitWithTimer(relayInfo.epochTime); - } - - const migration = migrations[DESTINATION_SPEC_VERSION]; - console.log('⭐️⭐️⭐️ DESTINATION_SPEC_VERSION ⭐️⭐️⭐️', DESTINATION_SPEC_VERSION); - for(const paraId in network.paras) { - console.log(`\n--- Upgrading the runtime of parachain ${paraId} \t---`); - const para = network.paras[paraId]; - - // Enable maintenance mode if present - await toggleMaintenanceMode(true, para.nodes[0].wsUri); - if(migration) { - console.log('⭐️⭐️⭐️ Running pre-upgrade scripts... ⭐️⭐️⭐️'); - await migration.before(); - } - - // Read the WASM code and authorize the upgrade with its hash and set it as the new runtime - const code = fs.readFileSync(NEW_PARA_WASM); - const codeHash = blake2AsHex(code); - await usingPlaygrounds(async (helper, privateKey) => { - const superuser = await privateKey(SUPERUSER_KEY); - - upgradingParas[paraId] = {version: getSpecVersion(helper.getApi()), upgraded: false}; - - console.log('--- Authorizing the parachain runtime upgrade \t---'); - let result = await helper.executeExtrinsic( - superuser, - 'api.tx.sudo.sudoUncheckedWeight', - [helper.constructApiCall('api.tx.parachainSystem.authorizeUpgrade', [codeHash, false]), {}], - ); - - if(result.status == 'Fail') { - console.error('Failed to authorize the upgrade:', result); - return; - } - - console.log('--- Enacting the parachain runtime upgrade \t---'); - result = await helper.executeExtrinsic( - superuser, - 'api.tx.sudo.sudoUncheckedWeight', - [helper.constructApiCall('api.tx.parachainSystem.enactAuthorizedUpgrade', [`0x${code.toString('hex')}`]), {}], - ); - - if(result.status == 'Fail') { - console.error('Failed to upgrade the runtime:', result); - } - }, para.nodes[0].wsUri); - } - - // Check the upgrades of the parachains, first after the minimum code validation delay, and then after some block time increments - let firstPass = true; - for(let attempt = 0; attempt < 3 && !paraUpgradeCompleted; attempt++) { - if(firstPass) { - console.log('\nCode validation delay:', codeValidationDelayBlocks, 'blocks'); - console.log('🚥 Waiting for the minimum code validation delay before the parachain can upgrade...'); - await waitWithTimer(relayInfo.blockTime * codeValidationDelayBlocks); - firstPass = false; - } else { - console.log('\n🚥 Waiting for a few blocks more to verify that the parachain upgrades are successful...'); - await waitWithTimer(PARACHAIN_BLOCK_TIME * 3); - } - - // Ping the parachains' nodes for new runtime versions - let upgradeFailed = false; - for(const paraId in network.paras) { - if(upgradingParas[paraId].upgraded) continue; - - const para = network.paras[paraId]; - // eslint-disable-next-line require-await - await usingPlaygrounds(async (helper) => { - const specVersion = getSpecVersion(helper.getApi()); - - if(specVersion != upgradingParas[paraId].version) { - // eslint-disable-next-line no-useless-escape - console.log(`\n\🛰️ Parachain ${paraId} has successfully upgraded from version ${upgradingParas[paraId].version} to ${specVersion}!`); - upgradingParas[paraId].upgraded = true; - } else { - console.error(`\nParachain ${paraId} failed to upgrade from version ${upgradingParas[paraId].version}!`); - upgradeFailed = true; - } - }, para.nodes[0].wsUri); - - paraUpgradeCompleted = !upgradeFailed; - } - } - - // Disable maintenance mode if present - for(const paraId in network.paras) { - // TODO only if our parachain - if(migration) { - console.log('⭐️⭐️⭐️ Running post-upgrade scripts... ⭐️⭐️⭐️'); - await migration.after(); - } - await toggleMaintenanceMode(false, network.paras[paraId].nodes[0].wsUri); - } - } else { - // If the relay did not need to be upgraded, it's already technically completed - paraUpgradeCompleted = true; - } - - // await network.stop(); - - if(isUpgradeTesting) { - if(paraUpgradeCompleted && relayUpgradeCompleted) { - console.log("\n🛸 PARACHAINS' RUNTIME UPGRADE TESTING COMPLETE 🛸"); - } else { - console.error("\n🚧 PARACHAINS' RUNTIME UPGRADE TESTING FAILED 🚧"); - } - } else { - console.log('🚀 ZOMBIENET RAISED 🚀'); - } -}; - -raiseZombienet()/*.then(async () => await stop())*/.catch(async (e) => { - console.error(e); - await stop(); - process.exit(1); -}); From 51232b085923683b5fea7ca023dd13d8fa80d0c9 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:06:43 +0200 Subject: [PATCH 091/143] build: upgrade dependencies to polkadot v1.1.0 --- Cargo.toml | 259 +++++++++--------- client/rpc/Cargo.toml | 18 +- crates/struct-versioning/Cargo.toml | 2 +- node/cli/Cargo.toml | 62 +++-- node/rpc/Cargo.toml | 4 +- pallets/app-promotion/Cargo.toml | 8 +- pallets/balances-adapter/Cargo.toml | 2 +- pallets/collator-selection/Cargo.toml | 8 +- pallets/common/Cargo.toml | 8 +- pallets/configuration/Cargo.toml | 10 +- pallets/evm-coder-substrate/Cargo.toml | 7 +- pallets/evm-contract-helpers/Cargo.toml | 4 +- pallets/evm-migration/Cargo.toml | 16 +- pallets/evm-transaction-payment/Cargo.toml | 4 +- pallets/foreign-assets/Cargo.toml | 15 +- pallets/fungible/Cargo.toml | 4 +- pallets/gov-origins/Cargo.toml | 2 - pallets/identity/Cargo.toml | 9 +- pallets/inflation/Cargo.toml | 6 +- pallets/maintenance/Cargo.toml | 16 +- pallets/nonfungible/Cargo.toml | 4 +- pallets/refungible/Cargo.toml | 4 +- pallets/structure/Cargo.toml | 8 +- pallets/unique/Cargo.toml | 5 +- primitives/app_promotion_rpc/Cargo.toml | 4 +- primitives/common/Cargo.toml | 2 + primitives/data-structs/Cargo.toml | 12 +- primitives/pov-estimate-rpc/Cargo.toml | 12 +- primitives/rpc/Cargo.toml | 4 +- .../precompiles/utils/macro/Cargo.toml | 2 +- runtime/opal/Cargo.toml | 71 +++-- runtime/quartz/Cargo.toml | 74 +++-- runtime/tests/Cargo.toml | 4 +- runtime/unique/Cargo.toml | 72 +++-- test-pallets/utils/Cargo.toml | 10 +- 35 files changed, 387 insertions(+), 365 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 898aa16eea..deea71cf01 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,6 @@ [workspace] default-members = ['client/*', 'node/*', 'runtime/opal'] +exclude = ['pallets/scheduler-v2'] members = [ 'client/*', 'crates/*', @@ -31,12 +32,10 @@ debug-assertions = true [workspace.dependencies] # Unique app-promotion-rpc = { path = "primitives/app_promotion_rpc", default-features = false } -evm-coder = { version = "0.4.2", default-features = false, features = [ - 'bondrewd', -] } +evm-coder = { version = "0.4.2", default-features = false, features = ['bondrewd'] } pallet-app-promotion = { path = "pallets/app-promotion", default-features = false } pallet-balances-adapter = { default-features = false, path = "pallets/balances-adapter" } -pallet-charge-transaction = { package = "pallet-template-transaction-payment", default-features = false, git = "https://github.com/uniquenetwork/pallet-sponsoring", branch = "polkadot-v0.9.43" } +pallet-charge-transaction = { package = "pallet-template-transaction-payment", default-features = false, git = "https://github.com/uniquenetwork/pallet-sponsoring", branch = "polkadot-v1.1.0" } pallet-collator-selection = { default-features = false, path = "pallets/collator-selection" } pallet-common = { default-features = false, path = "pallets/common" } pallet-configuration = { default-features = false, path = "pallets/configuration" } @@ -55,7 +54,7 @@ pallet-refungible = { default-features = false, path = "pallets/refungible" } pallet-structure = { default-features = false, path = "pallets/structure" } pallet-test-utils = { default-features = false, path = "test-pallets/utils" } pallet-unique = { path = "pallets/unique", default-features = false } -pallet-unique-scheduler-v2 = { path = "pallets/scheduler-v2", default-features = false } +# pallet-unique-scheduler-v2 = { path = "pallets/scheduler-v2", default-features = false } precompile-utils-macro = { path = "runtime/common/ethereum/precompiles/utils/macro" } struct-versioning = { path = "crates/struct-versioning" } uc-rpc = { path = "client/rpc" } @@ -64,7 +63,7 @@ up-common = { path = "primitives/common", default-features = false } up-data-structs = { path = "primitives/data-structs", default-features = false } up-pov-estimate-rpc = { path = "primitives/pov-estimate-rpc", default-features = false } up-rpc = { path = "primitives/rpc", default-features = false } -up-sponsorship = { default-features = false, git = "https://github.com/uniquenetwork/pallet-sponsoring", branch = "polkadot-v0.9.43" } +up-sponsorship = { default-features = false, git = "https://github.com/uniquenetwork/pallet-sponsoring", branch = "polkadot-v1.1.0" } # Unique: Runtimes opal-runtime = { path = "runtime/opal" } @@ -72,132 +71,138 @@ quartz-runtime = { path = "runtime/quartz" } unique-runtime = { path = "runtime/unique" } # Frontier (Unique patches over the Parity version) -fc-consensus = { git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -fc-db = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -fc-mapping-sync = { git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -fc-rpc = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -fc-rpc-core = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -fp-evm = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -fp-rpc = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -fp-self-contained = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -fp-storage = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -pallet-base-fee = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -pallet-ethereum = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -pallet-evm = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } -pallet-evm-precompile-simple = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v0.9.43" } +fc-api = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +fc-consensus = { git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +fc-db = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +fc-mapping-sync = { git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +fc-rpc = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +fc-rpc-core = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +fp-evm = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +fp-rpc = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +fp-self-contained = { default-features = false, features = [ + "serde", +], git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +fp-storage = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +pallet-base-fee = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +pallet-ethereum = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +pallet-evm = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } +pallet-evm-precompile-simple = { default-features = false, git = "https://github.com/uniquenetwork/unique-frontier", branch = "unique-polkadot-v1.1.0" } # Parity -codec = { default-features = false, features = ['derive'], package = 'parity-scale-codec', version = "3.2.2" } -cumulus-client-cli = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-client-consensus-aura = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-client-consensus-common = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-client-network = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-client-service = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-pallet-aura-ext = { default-features = false, git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-pallet-dmp-queue = { default-features = false, git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-pallet-parachain-system = { default-features = false, git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-pallet-xcm = { default-features = false, git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-pallet-xcmp-queue = { default-features = false, git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-primitives-core = { default-features = false, git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-primitives-timestamp = { default-features = false, git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-primitives-utility = { default-features = false, git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-relay-chain-inprocess-interface = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-relay-chain-interface = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -cumulus-relay-chain-minimal-node = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -frame-executive = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -frame-support = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -frame-system = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -frame-system-rpc-runtime-api = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-aura = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-authorship = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-balances = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-collective = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-democracy = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-membership = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-preimage = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-ranked-collective = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-referenda = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-scheduler = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-session = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-state-trie-migration = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-sudo = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-timestamp = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-utility = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-transaction-payment = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-transaction-payment-rpc-runtime-api = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-treasury = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43", default-features = false } -parachain-info = { default-features = false, git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.43" } -parity-scale-codec = { version = "3.2.2", features = ["derive"], default-features = false } -polkadot-cli = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43" } -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43", default-features = false } -polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43" } -polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43" } -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-consensus-manual-seal = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-network-sync = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-sysinfo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -sp-api = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-arithmetic = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-block-builder = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-consensus-aura = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-core = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-externalities = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-inherents = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-io = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-offchain = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-runtime = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-session = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-staking = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-std = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-transaction-pool = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-trie = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -sp-version = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -xcm = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43", default-features = false } -xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43", default-features = false } -xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.43", default-features = false } +codec = { default-features = false, features = ['derive'], package = 'parity-scale-codec', version = "3.6.5" } +cumulus-client-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-collator = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-consensus-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-consensus-proposer = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-network = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-pallet-aura-ext = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-pallet-dmp-queue = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-pallet-parachain-system = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-pallet-xcm = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-pallet-xcmp-queue = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-primitives-core = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-primitives-parachain-inherent = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-primitives-timestamp = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-primitives-utility = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-relay-chain-inprocess-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-relay-chain-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-relay-chain-minimal-node = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +frame-executive = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +frame-support = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +frame-system = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +frame-system-rpc-runtime-api = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-aura = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-authorship = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-balances = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-collective = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-democracy = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-membership = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-preimage = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-ranked-collective = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-referenda = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-scheduler = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-session = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-state-trie-migration = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-sudo = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-timestamp = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-transaction-payment = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-transaction-payment-rpc-runtime-api = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-treasury = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-utility = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-xcm = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +parachain-info = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +parity-scale-codec = { version = "3.6.5", features = ["derive"], default-features = false } +polkadot-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +polkadot-parachain-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +polkadot-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +polkadot-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-consensus-manual-seal = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-executor = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-network = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-network-sync = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-sysinfo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-tracing = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +sp-api = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-arithmetic = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-block-builder = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-consensus-aura = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-core = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-externalities = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-inherents = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-io = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-keystore = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-offchain = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-runtime = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-session = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-staking = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-state-machine = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-std = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-tracing = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-transaction-pool = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-trie = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-version = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-weights = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +staging-xcm = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +staging-xcm-builder = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +staging-xcm-executor = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } # Parity: Build utils -substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } # Parity: Benchmarking -frame-benchmarking = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -frame-system-benchmarking = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +frame-benchmarking = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +frame-system-benchmarking = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } # Parity: Try Runtime -frame-try-runtime = { default-features = false, git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } -try-runtime-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +frame-try-runtime = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +try-runtime-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } # ORML -orml-tokens = { default-features = false, git = "https://github.com/open-web3-stack/open-runtime-module-library", branch = "polkadot-v0.9.43" } -orml-traits = { default-features = false, git = "https://github.com/open-web3-stack/open-runtime-module-library", branch = "polkadot-v0.9.43" } -orml-vesting = { default-features = false, git = "https://github.com/open-web3-stack/open-runtime-module-library", branch = "polkadot-v0.9.43" } -orml-xcm-support = { default-features = false, git = "https://github.com/open-web3-stack/open-runtime-module-library", branch = "polkadot-v0.9.43" } -orml-xtokens = { default-features = false, git = "https://github.com/open-web3-stack/open-runtime-module-library", branch = "polkadot-v0.9.43" } +orml-tokens = { default-features = false, git = "https://github.com/moonbeam-foundation/open-runtime-module-library", branch = "upgrade-to-polkadot-v1.1.0" } +orml-traits = { default-features = false, git = "https://github.com/moonbeam-foundation/open-runtime-module-library", branch = "upgrade-to-polkadot-v1.1.0" } +orml-vesting = { default-features = false, git = "https://github.com/moonbeam-foundation/open-runtime-module-library", branch = "upgrade-to-polkadot-v1.1.0" } +orml-xcm-support = { default-features = false, git = "https://github.com/moonbeam-foundation/open-runtime-module-library", branch = "upgrade-to-polkadot-v1.1.0" } +orml-xtokens = { default-features = false, git = "https://github.com/moonbeam-foundation/open-runtime-module-library", branch = "upgrade-to-polkadot-v1.1.0" } # Other derivative = { version = "2.2.0", features = ["use_core"] } @@ -205,8 +210,8 @@ ethereum = { version = "0.14.0", default-features = false } evm-core = { git = "https://github.com/rust-blockchain/evm", rev = "b7b82c7e1fc57b7449d6dfa6826600de37cc1e65", default-features = false } hex-literal = "0.4.1" impl-trait-for-tuples = "0.2.2" -jsonrpsee = { version = "0.16.2", features = ["macros", "server"] } -log = { version = "0.4.16", default-features = false } -num_enum = { version = "0.5.3", default-features = false } -serde = { default-features = false, features = ['derive'], version = "1.0.136" } -smallvec = "1.6.1" +jsonrpsee = { version = "0.16.3", features = ["macros", "server"] } +log = { version = "0.4.20", default-features = false } +num_enum = { version = "0.7.0", default-features = false } +serde = { default-features = false, features = ['derive'], version = "1.0.188" } +smallvec = "1.11.1" diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index bd02b720fc..bd1ce62919 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -5,13 +5,11 @@ name = "uc-rpc" version = "0.1.4" [dependencies] -anyhow = "1.0.57" -jsonrpsee = { version = "0.16.2", features = ["macros", "server"] } -trie-db = { version = "0.24.0", default-features = false } -zstd = { version = "0.11.2", default-features = false } - -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } +anyhow = "1.0.75" +jsonrpsee = { version = "0.16.3", features = ["macros", "server"] } +parity-scale-codec = { workspace = true } +trie-db = { version = "0.27.1", default-features = false } +zstd = { version = "0.12.4", default-features = false } app-promotion-rpc = { workspace = true } up-common = { workspace = true } @@ -44,12 +42,8 @@ unique-runtime = { workspace = true, optional = true } hex-literal = { workspace = true } [features] +all-runtimes = ['opal-runtime', 'quartz-runtime', 'unique-runtime'] default = ['opal-runtime'] -all-runtimes = [ - 'opal-runtime', - 'quartz-runtime', - 'unique-runtime', -] pov-estimate = [ 'opal-runtime/pov-estimate', 'quartz-runtime?/pov-estimate', diff --git a/crates/struct-versioning/Cargo.toml b/crates/struct-versioning/Cargo.toml index fe46463fe0..21c382c2b5 100644 --- a/crates/struct-versioning/Cargo.toml +++ b/crates/struct-versioning/Cargo.toml @@ -4,7 +4,7 @@ name = "struct-versioning" version = "0.1.0" [dependencies] -quote = "1.0.15" +quote = "1.0.33" syn = { version = "1.0", features = ["full"] } [lib] diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index eadf809978..64de1b786c 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -20,23 +20,24 @@ path = "src/main.rs" targets = ['x86_64-unknown-linux-gnu'] [dependencies] -clap = "4.1" +clap = "4.4" futures = '0.3.28' -tokio = { version = "1.24", features = ["time"] } serde_json = "1.0" +tokio = { version = "1.32", features = ["time"] } log = { workspace = true } -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } +parity-scale-codec = { workspace = true } cumulus-client-cli = { workspace = true } +cumulus-client-collator = { workspace = true } cumulus-client-consensus-aura = { workspace = true } cumulus-client-consensus-common = { workspace = true } +cumulus-client-consensus-proposer = { workspace = true } cumulus-client-network = { workspace = true } cumulus-client-service = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-parachain-inherent = { workspace = true } +cumulus-primitives-parachain-inherent = { features = ["std"], workspace = true } cumulus-relay-chain-inprocess-interface = { workspace = true } cumulus-relay-chain-interface = { workspace = true } cumulus-relay-chain-minimal-node = { workspace = true } @@ -85,28 +86,57 @@ up-data-structs = { workspace = true } fc-consensus = { workspace = true } fc-db = { workspace = true } fc-mapping-sync = { workspace = true } -fc-rpc = { workspace = true } fc-rpc-core = { workspace = true } +fc-rpc.workspace = true fp-rpc = { workspace = true } app-promotion-rpc = { workspace = true } +fc-api.workspace = true +fp-storage.workspace = true +jsonrpsee.workspace = true +pallet-transaction-payment-rpc.workspace = true +sc-rpc-api.workspace = true +sc-rpc.workspace = true +sp-inherents.workspace = true uc-rpc = { workspace = true } -unique-rpc = { workspace = true } up-pov-estimate-rpc = { workspace = true } up-rpc = { workspace = true } -jsonrpsee.workspace = true -fp-storage.workspace = true -sc-rpc.workspace = true [build-dependencies] substrate-build-script-utils = { workspace = true } [features] -default = ["opal-runtime"] all-runtimes = ['opal-runtime', 'quartz-runtime', 'unique-runtime'] -pov-estimate = ['opal-runtime/pov-estimate', 'quartz-runtime?/pov-estimate', 'uc-rpc/pov-estimate', 'unique-rpc/pov-estimate', 'unique-runtime?/pov-estimate'] -runtime-benchmarks = ['opal-runtime/runtime-benchmarks', 'polkadot-cli/runtime-benchmarks', 'polkadot-service/runtime-benchmarks', 'quartz-runtime?/runtime-benchmarks', 'sc-service/runtime-benchmarks', 'unique-runtime?/runtime-benchmarks'] +default = ["opal-runtime"] +gov-test-timings = [ + 'opal-runtime/gov-test-timings', + 'quartz-runtime?/gov-test-timings', + 'unique-runtime?/gov-test-timings', +] +lookahead = [] +pov-estimate = [ + 'opal-runtime/pov-estimate', + 'quartz-runtime?/pov-estimate', + 'uc-rpc/pov-estimate', + 'unique-runtime?/pov-estimate', +] +runtime-benchmarks = [ + 'opal-runtime/runtime-benchmarks', + 'polkadot-cli/runtime-benchmarks', + 'polkadot-service/runtime-benchmarks', + 'quartz-runtime?/runtime-benchmarks', + 'sc-service/runtime-benchmarks', + 'unique-runtime?/runtime-benchmarks', +] sapphire-runtime = ['quartz-runtime', 'quartz-runtime/become-sapphire'] -try-runtime = ['opal-runtime?/try-runtime', 'quartz-runtime?/try-runtime', 'try-runtime-cli/try-runtime', 'unique-runtime?/try-runtime'] -session-test-timings = ['opal-runtime/session-test-timings', 'quartz-runtime?/session-test-timings', 'unique-runtime?/session-test-timings'] -gov-test-timings = ['opal-runtime/gov-test-timings', 'quartz-runtime?/gov-test-timings', 'unique-runtime?/gov-test-timings'] +session-test-timings = [ + 'opal-runtime/session-test-timings', + 'quartz-runtime?/session-test-timings', + 'unique-runtime?/session-test-timings', +] +try-runtime = [ + 'opal-runtime?/try-runtime', + 'quartz-runtime?/try-runtime', + 'try-runtime-cli/try-runtime', + 'unique-runtime?/try-runtime', +] diff --git a/node/rpc/Cargo.toml b/node/rpc/Cargo.toml index 884c5a5b6e..02cf00e03e 100644 --- a/node/rpc/Cargo.toml +++ b/node/rpc/Cargo.toml @@ -27,20 +27,20 @@ sp-runtime = { workspace = true } substrate-frame-rpc-system = { workspace = true } fc-db = { workspace = true } +fc-mapping-sync = { workspace = true } fc-rpc = { workspace = true } fc-rpc-core = { workspace = true } -fc-mapping-sync = { workspace = true } fp-rpc = { workspace = true } fp-storage = { workspace = true } app-promotion-rpc = { workspace = true } +pallet-ethereum.workspace = true serde = { workspace = true } uc-rpc = { workspace = true } up-common = { workspace = true } up-data-structs = { workspace = true } up-pov-estimate-rpc = { workspace = true, default-features = true } up-rpc = { workspace = true } -pallet-ethereum.workspace = true [features] default = [] diff --git a/pallets/app-promotion/Cargo.toml b/pallets/app-promotion/Cargo.toml index 49717137bc..263026465a 100644 --- a/pallets/app-promotion/Cargo.toml +++ b/pallets/app-promotion/Cargo.toml @@ -23,11 +23,11 @@ runtime-benchmarks = [ # 'pallet-unique/runtime-benchmarks', ] std = [ - 'codec/std', 'frame-benchmarking/std', 'frame-support/std', 'frame-system/std', 'pallet-evm/std', + 'parity-scale-codec/std', 'sp-core/std', 'sp-runtime/std', 'sp-std/std', @@ -39,9 +39,7 @@ try-runtime = ["frame-support/try-runtime"] ################################################################################ # Substrate Dependencies -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - +parity-scale-codec = { workspace = true } scale-info = { workspace = true } frame-benchmarking = { workspace = true, optional = true } @@ -68,4 +66,4 @@ up-data-structs = { workspace = true } ################################################################################ # Other -log = { version = "0.4.16", default-features = false } +log = { version = "0.4.20", default-features = false } diff --git a/pallets/balances-adapter/Cargo.toml b/pallets/balances-adapter/Cargo.toml index c916c0eb90..91e6c3dea2 100644 --- a/pallets/balances-adapter/Cargo.toml +++ b/pallets/balances-adapter/Cargo.toml @@ -15,7 +15,7 @@ sp-runtime = { workspace = true } sp-std = { workspace = true } #Parity -codec = { workspace = true, package = "parity-scale-codec" } +parity-scale-codec = { workspace = true } scale-info = { workspace = true } # Unique diff --git a/pallets/collator-selection/Cargo.toml b/pallets/collator-selection/Cargo.toml index 7815141673..b3c33f5ba8 100644 --- a/pallets/collator-selection/Cargo.toml +++ b/pallets/collator-selection/Cargo.toml @@ -12,11 +12,9 @@ version = "5.0.0" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - log = { workspace = true } -rand = { version = "0.8.5", features = ["std_rng"], default-features = false } +parity-scale-codec = { workspace = true } +rand = { version = "0.8.5", default-features = false } scale-info = { workspace = true } serde = { workspace = true } @@ -48,13 +46,13 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", ] std = [ - "codec/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", "pallet-authorship/std", "pallet-session/std", + "parity-scale-codec/std", "rand/std", "scale-info/std", "sp-consensus-aura/std", diff --git a/pallets/common/Cargo.toml b/pallets/common/Cargo.toml index 05c7d44679..4907700c58 100644 --- a/pallets/common/Cargo.toml +++ b/pallets/common/Cargo.toml @@ -5,9 +5,7 @@ name = "pallet-common" version = "0.1.14" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - +parity-scale-codec = { workspace = true } scale-info = { workspace = true } bondrewd = { version = "0.1.14", features = ["derive"], default-features = false } @@ -21,6 +19,7 @@ pallet-evm-coder-substrate = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } +sp-weights = { workspace = true } up-data-structs = { workspace = true } up-pov-estimate-rpc = { workspace = true } @@ -28,14 +27,15 @@ up-pov-estimate-rpc = { workspace = true } default = ["std"] runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks", "up-data-structs/runtime-benchmarks"] std = [ + "evm-coder/std", "frame-support/std", "frame-system/std", "pallet-evm/std", "sp-runtime/std", "sp-std/std", + "sp-weights/std", "up-data-structs/std", "up-pov-estimate-rpc/std", - "evm-coder/std", ] stubgen = ["evm-coder/stubgen", "up-data-structs/stubgen"] tests = [] diff --git a/pallets/configuration/Cargo.toml b/pallets/configuration/Cargo.toml index 0648c2716a..69044c94d7 100644 --- a/pallets/configuration/Cargo.toml +++ b/pallets/configuration/Cargo.toml @@ -4,8 +4,7 @@ name = "pallet-configuration" version = "0.2.0" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } +parity-scale-codec = { workspace = true } fp-evm = { workspace = true } frame-benchmarking = { workspace = true, optional = true } @@ -16,23 +15,26 @@ smallvec = { workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } +sp-runtime = { workspace = true } sp-std = { workspace = true } +staging-xcm = { workspace = true } up-common = { workspace = true } -xcm = { workspace = true } hex-literal = { workspace = true } +log = { workspace = true } [features] default = ["std"] runtime-benchmarks = ["frame-benchmarking"] std = [ - "codec/std", "fp-evm/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", + "parity-scale-codec/std", "sp-arithmetic/std", "sp-core/std", + "sp-runtime/std", "sp-std/std", ] try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/evm-coder-substrate/Cargo.toml b/pallets/evm-coder-substrate/Cargo.toml index b4848a27c3..efef580d67 100644 --- a/pallets/evm-coder-substrate/Cargo.toml +++ b/pallets/evm-coder-substrate/Cargo.toml @@ -5,8 +5,7 @@ name = "pallet-evm-coder-substrate" version = "0.1.3" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } +parity-scale-codec = { workspace = true } scale-info = { workspace = true } evm-coder = { workspace = true } @@ -18,10 +17,11 @@ frame-system = { workspace = true } pallet-evm = { workspace = true } sp-core = { workspace = true } sp-std = { workspace = true } +sp-weights = { workspace = true } up-data-structs = { workspace = true } evm-coder-substrate-procedural = { path = "./procedural" } -spez = "0.1.1" +spez = "0.1.2" [features] default = ["std"] @@ -33,6 +33,7 @@ std = [ "pallet-evm/std", "sp-core/std", "sp-std/std", + "sp-weights/std", 'frame-benchmarking/std', ] try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/evm-contract-helpers/Cargo.toml b/pallets/evm-contract-helpers/Cargo.toml index 1441a3a321..987f4aaa30 100644 --- a/pallets/evm-contract-helpers/Cargo.toml +++ b/pallets/evm-contract-helpers/Cargo.toml @@ -18,13 +18,11 @@ pallet-evm = { workspace = true } up-sponsorship = { workspace = true } # Locals -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - evm-coder = { workspace = true } pallet-common = { workspace = true } pallet-evm-coder-substrate = { workspace = true } pallet-evm-transaction-payment = { workspace = true } +parity-scale-codec = { workspace = true } up-data-structs = { workspace = true, features = ['serde1'] } [features] diff --git a/pallets/evm-migration/Cargo.toml b/pallets/evm-migration/Cargo.toml index a14f970842..bbd31d8b7c 100644 --- a/pallets/evm-migration/Cargo.toml +++ b/pallets/evm-migration/Cargo.toml @@ -5,21 +5,27 @@ name = "pallet-evm-migration" version = "0.1.1" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - ethereum = { workspace = true } frame-benchmarking = { workspace = true, optional = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-evm = { workspace = true } +parity-scale-codec = { workspace = true } scale-info = { workspace = true } sp-core = { workspace = true } -sp-std = { workspace = true } sp-io = { workspace = true } +sp-std = { workspace = true } [features] default = ["runtime-benchmarks", "std"] runtime-benchmarks = ["frame-benchmarking"] -std = ["frame-benchmarking/std", "frame-support/std", "frame-system/std", "pallet-evm/std", "sp-core/std", "sp-std/std", "sp-io/std"] +std = [ + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "pallet-evm/std", + "sp-core/std", + "sp-io/std", + "sp-std/std", +] try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/evm-transaction-payment/Cargo.toml b/pallets/evm-transaction-payment/Cargo.toml index 7a46caf796..342138c417 100644 --- a/pallets/evm-transaction-payment/Cargo.toml +++ b/pallets/evm-transaction-payment/Cargo.toml @@ -5,9 +5,7 @@ name = "pallet-evm-transaction-payment" version = "0.1.1" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - +parity-scale-codec = { workspace = true } scale-info = { workspace = true } fp-evm = { workspace = true } diff --git a/pallets/foreign-assets/Cargo.toml b/pallets/foreign-assets/Cargo.toml index 5947b6002f..a1074908d1 100644 --- a/pallets/foreign-assets/Cargo.toml +++ b/pallets/foreign-assets/Cargo.toml @@ -5,9 +5,7 @@ name = "pallet-foreign-assets" version = "0.1.0" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - +parity-scale-codec = { workspace = true } scale-info = { workspace = true } frame-benchmarking = { workspace = true, optional = true } @@ -18,18 +16,17 @@ orml-tokens = { workspace = true } pallet-balances = { features = ["insecure_zero_ed"], workspace = true } pallet-common = { workspace = true } pallet-fungible = { workspace = true } -serde = { workspace = true, optional = true } +serde = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } +staging-xcm = { workspace = true } +staging-xcm-executor = { workspace = true } up-data-structs = { workspace = true } -xcm = { workspace = true } -xcm-executor = { workspace = true } [features] default = ["std"] runtime-benchmarks = ['frame-benchmarking', 'pallet-common/runtime-benchmarks'] std = [ - "codec/std", "frame-support/std", "frame-system/std", "log/std", @@ -37,11 +34,11 @@ std = [ "pallet-balances/std", "pallet-common/std", "pallet-fungible/std", + "parity-scale-codec/std", "scale-info/std", - "serde", "sp-runtime/std", "sp-std/std", + "staging-xcm-executor/std", "up-data-structs/std", - "xcm-executor/std" ] try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/fungible/Cargo.toml b/pallets/fungible/Cargo.toml index d227f7b8e1..54af678c74 100644 --- a/pallets/fungible/Cargo.toml +++ b/pallets/fungible/Cargo.toml @@ -5,9 +5,7 @@ name = "pallet-fungible" version = "0.1.11" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - +parity-scale-codec = { workspace = true } scale-info = { workspace = true } evm-coder = { workspace = true } diff --git a/pallets/gov-origins/Cargo.toml b/pallets/gov-origins/Cargo.toml index 48b669582b..80dffba7ee 100644 --- a/pallets/gov-origins/Cargo.toml +++ b/pallets/gov-origins/Cargo.toml @@ -23,9 +23,7 @@ try-runtime = ["frame-support/try-runtime"] ################################################################################ # Substrate Dependencies -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. parity-scale-codec = { workspace = true } - scale-info = { workspace = true } frame-support = { workspace = true } diff --git a/pallets/identity/Cargo.toml b/pallets/identity/Cargo.toml index 225f890f4e..e017a4def4 100644 --- a/pallets/identity/Cargo.toml +++ b/pallets/identity/Cargo.toml @@ -13,10 +13,9 @@ version = "4.0.0-dev" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec", features = ["max-encoded-len"] } +parity-scale-codec = { workspace = true, features = ["max-encoded-len"] } -enumflags2 = "0.7.4" +enumflags2 = "0.7.8" frame-benchmarking = { workspace = true, optional = true } frame-support = { workspace = true } frame-system = { workspace = true } @@ -38,14 +37,14 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", ] std = [ - "codec/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "pallet-balances/std", + "parity-scale-codec/std", "scale-info/std", "sp-io/std", "sp-runtime/std", "sp-std/std", - "pallet-balances/std", ] try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/inflation/Cargo.toml b/pallets/inflation/Cargo.toml index 771175aafe..95716c6eba 100644 --- a/pallets/inflation/Cargo.toml +++ b/pallets/inflation/Cargo.toml @@ -18,20 +18,18 @@ targets = ['x86_64-unknown-linux-gnu'] default = ['std'] runtime-benchmarks = ['frame-benchmarking'] std = [ - 'codec/std', 'frame-benchmarking/std', 'frame-support/std', 'frame-system/std', 'pallet-balances/std', + 'parity-scale-codec/std', 'sp-runtime/std', 'sp-std/std', ] try-runtime = ["frame-support/try-runtime"] [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - +parity-scale-codec = { workspace = true } scale-info = { workspace = true } frame-benchmarking = { workspace = true, optional = true } diff --git a/pallets/maintenance/Cargo.toml b/pallets/maintenance/Cargo.toml index 5ed734c480..075215b3b3 100644 --- a/pallets/maintenance/Cargo.toml +++ b/pallets/maintenance/Cargo.toml @@ -10,19 +10,27 @@ repository = "https://github.com/UniqueNetwork/unique-chain" version = "0.1.0" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - +parity-scale-codec = { workspace = true } scale-info = { workspace = true } frame-benchmarking = { workspace = true, optional = true } frame-support = { workspace = true } frame-system = { workspace = true } sp-core = { workspace = true } +sp-runtime = { workspace = true } sp-std = { workspace = true } [features] default = ["std"] runtime-benchmarks = ["frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks"] -std = ["codec/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", "scale-info/std", "sp-core/std", "sp-std/std"] +std = [ + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "parity-scale-codec/std", + "scale-info/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] try-runtime = ["frame-support/try-runtime"] diff --git a/pallets/nonfungible/Cargo.toml b/pallets/nonfungible/Cargo.toml index d1311cd899..098b59da03 100644 --- a/pallets/nonfungible/Cargo.toml +++ b/pallets/nonfungible/Cargo.toml @@ -5,9 +5,6 @@ name = "pallet-nonfungible" version = "0.1.14" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - evm-coder = { workspace = true } frame-benchmarking = { workspace = true, optional = true } frame-support = { workspace = true } @@ -16,6 +13,7 @@ pallet-common = { workspace = true } pallet-evm = { workspace = true } pallet-evm-coder-substrate = { workspace = true } pallet-structure = { workspace = true } +parity-scale-codec = { workspace = true } scale-info = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } diff --git a/pallets/refungible/Cargo.toml b/pallets/refungible/Cargo.toml index 980c5384c3..e1a7e4c68d 100644 --- a/pallets/refungible/Cargo.toml +++ b/pallets/refungible/Cargo.toml @@ -5,9 +5,7 @@ name = "pallet-refungible" version = "0.2.13" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - +parity-scale-codec = { workspace = true } scale-info = { workspace = true } evm-coder = { workspace = true } diff --git a/pallets/structure/Cargo.toml b/pallets/structure/Cargo.toml index 6a73858492..ff28a0a6ca 100644 --- a/pallets/structure/Cargo.toml +++ b/pallets/structure/Cargo.toml @@ -4,9 +4,7 @@ name = "pallet-structure" version = "0.1.2" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - +parity-scale-codec = { workspace = true } scale-info = { workspace = true } frame-benchmarking = { workspace = true, optional = true } @@ -15,6 +13,7 @@ frame-system = { workspace = true } log = { workspace = true } pallet-common = { workspace = true } pallet-evm = { workspace = true } +sp-runtime = { workspace = true } sp-std = { workspace = true } up-data-structs = { workspace = true } @@ -27,8 +26,9 @@ std = [ "frame-system/std", "pallet-common/std", "pallet-evm/std", - "codec/std", + "parity-scale-codec/std", "scale-info/std", + "sp-runtime/std", "sp-std/std", "up-data-structs/std", ] diff --git a/pallets/unique/Cargo.toml b/pallets/unique/Cargo.toml index 3807d5da5a..9af8e3653a 100644 --- a/pallets/unique/Cargo.toml +++ b/pallets/unique/Cargo.toml @@ -19,7 +19,6 @@ default = ['std'] limit-testing = ["up-data-structs/limit-testing"] runtime-benchmarks = ['frame-benchmarking', 'pallet-common/runtime-benchmarks'] std = [ - 'codec/std', 'evm-coder/std', 'frame-benchmarking/std', 'frame-support/std', @@ -29,6 +28,7 @@ std = [ 'pallet-evm-coder-substrate/std', 'pallet-evm/std', 'pallet-nonfungible/std', + 'parity-scale-codec/std', 'sp-runtime/std', 'sp-std/std', 'up-data-structs/std', @@ -39,8 +39,7 @@ try-runtime = ["frame-support/try-runtime"] ################################################################################ # Local Dependencies [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } +parity-scale-codec = { workspace = true } ethereum = { workspace = true } evm-coder = { workspace = true } diff --git a/primitives/app_promotion_rpc/Cargo.toml b/primitives/app_promotion_rpc/Cargo.toml index fd93b16e68..d601552d82 100644 --- a/primitives/app_promotion_rpc/Cargo.toml +++ b/primitives/app_promotion_rpc/Cargo.toml @@ -5,12 +5,12 @@ name = "app-promotion-rpc" version = "0.1.0" [dependencies] -codec = { workspace = true } pallet-evm = { workspace = true } +parity-scale-codec = { workspace = true } sp-api = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } [features] default = ["std"] -std = ["codec/std", "sp-api/std", "sp-runtime/std", "sp-std/std"] +std = ["parity-scale-codec/std", "sp-api/std", "sp-runtime/std", "sp-std/std"] diff --git a/primitives/common/Cargo.toml b/primitives/common/Cargo.toml index 0deefea0e8..4b12b4a027 100644 --- a/primitives/common/Cargo.toml +++ b/primitives/common/Cargo.toml @@ -13,6 +13,7 @@ default = ['std'] std = [ 'cumulus-primitives-core/std', 'fp-rpc/std', + 'fp-self-contained/std', 'frame-support/std', 'pallet-evm/std', 'sp-consensus-aura/std', @@ -24,6 +25,7 @@ std = [ [dependencies] cumulus-primitives-core = { workspace = true } fp-rpc = { workspace = true } +fp-self-contained = { workspace = true } frame-support = { workspace = true } pallet-evm = { workspace = true } sp-consensus-aura = { workspace = true } diff --git a/primitives/data-structs/Cargo.toml b/primitives/data-structs/Cargo.toml index f70992b1f2..21c9ac7396 100644 --- a/primitives/data-structs/Cargo.toml +++ b/primitives/data-structs/Cargo.toml @@ -9,20 +9,18 @@ repository = 'https://github.com/UniqueNetwork/unique-chain' version = "0.2.2" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - +bondrewd = { version = "0.1.14", features = ["derive"], default-features = false } derivative = { workspace = true } +evm-coder = { workspace = true } frame-support = { workspace = true } pallet-evm = { workspace = true } +parity-scale-codec = { workspace = true } scale-info = { workspace = true } -serde = { workspace = true, optional = true } +serde = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } -bondrewd = { version = "0.1.14", features = ["derive"], default-features = false } struct-versioning = { workspace = true } -evm-coder = { workspace = true } [features] default = ["std"] @@ -30,9 +28,9 @@ limit-testing = [] runtime-benchmarks = [] serde1 = ["serde/alloc"] std = [ - "codec/std", "frame-support/std", "pallet-evm/std", + "parity-scale-codec/std", "serde/std", "serde1", "sp-core/std", diff --git a/primitives/pov-estimate-rpc/Cargo.toml b/primitives/pov-estimate-rpc/Cargo.toml index ef872d1d9d..938ea9c343 100644 --- a/primitives/pov-estimate-rpc/Cargo.toml +++ b/primitives/pov-estimate-rpc/Cargo.toml @@ -5,7 +5,7 @@ name = "up-pov-estimate-rpc" version = "0.1.0" [dependencies] -codec = { workspace = true } +parity-scale-codec = { workspace = true } scale-info = { workspace = true } serde = { workspace = true, optional = true } sp-api = { workspace = true } @@ -15,4 +15,12 @@ sp-std = { workspace = true } [features] default = ["std"] -std = ["codec/std", "scale-info/std", "serde/std", "sp-api/std", "sp-core/std", "sp-runtime/std", "sp-std/std"] +std = [ + "parity-scale-codec/std", + "scale-info/std", + "serde/std", + "sp-api/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index e8b0c94ea5..94f552164b 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -5,9 +5,9 @@ name = "up-rpc" version = "0.1.3" [dependencies] -codec = { workspace = true } pallet-common = { workspace = true } pallet-evm = { workspace = true } +parity-scale-codec = { workspace = true } sp-api = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } @@ -17,8 +17,8 @@ up-data-structs = { workspace = true } [features] default = ["std"] std = [ - "codec/std", "pallet-common/std", + "parity-scale-codec/std", "sp-api/std", "sp-core/std", "sp-runtime/std", diff --git a/runtime/common/ethereum/precompiles/utils/macro/Cargo.toml b/runtime/common/ethereum/precompiles/utils/macro/Cargo.toml index c7cb90055e..61f96eb946 100644 --- a/runtime/common/ethereum/precompiles/utils/macro/Cargo.toml +++ b/runtime/common/ethereum/precompiles/utils/macro/Cargo.toml @@ -9,7 +9,7 @@ version = "0.1.0" proc-macro = true [dependencies] -num_enum = { version = "0.5.3", default-features = false } +num_enum = { version = "0.7.0", default-features = false } proc-macro2 = "1.0" quote = "1.0" sha3 = "0.8" diff --git a/runtime/opal/Cargo.toml b/runtime/opal/Cargo.toml index 047c081765..b03309be17 100644 --- a/runtime/opal/Cargo.toml +++ b/runtime/opal/Cargo.toml @@ -38,8 +38,10 @@ runtime-benchmarks = [ 'pallet-app-promotion/runtime-benchmarks', 'pallet-balances/runtime-benchmarks', 'pallet-collator-selection/runtime-benchmarks', + 'pallet-collective/runtime-benchmarks', 'pallet-common/runtime-benchmarks', 'pallet-configuration/runtime-benchmarks', + 'pallet-democracy/runtime-benchmarks', 'pallet-ethereum/runtime-benchmarks', 'pallet-evm-coder-substrate/runtime-benchmarks', 'pallet-evm-migration/runtime-benchmarks', @@ -48,25 +50,21 @@ runtime-benchmarks = [ 'pallet-identity/runtime-benchmarks', 'pallet-inflation/runtime-benchmarks', 'pallet-maintenance/runtime-benchmarks', + 'pallet-membership/runtime-benchmarks', 'pallet-nonfungible/runtime-benchmarks', - 'pallet-democracy/runtime-benchmarks', - 'pallet-collective/runtime-benchmarks', 'pallet-ranked-collective/runtime-benchmarks', - 'pallet-membership/runtime-benchmarks', 'pallet-referenda/runtime-benchmarks', - 'pallet-scheduler/runtime-benchmarks', 'pallet-refungible/runtime-benchmarks', + 'pallet-scheduler/runtime-benchmarks', 'pallet-structure/runtime-benchmarks', 'pallet-timestamp/runtime-benchmarks', - 'pallet-utility/runtime-benchmarks', - 'pallet-unique-scheduler-v2/runtime-benchmarks', 'pallet-unique/runtime-benchmarks', + 'pallet-utility/runtime-benchmarks', 'pallet-xcm/runtime-benchmarks', 'sp-runtime/runtime-benchmarks', - 'xcm-builder/runtime-benchmarks', + 'staging-xcm-builder/runtime-benchmarks', ] std = [ - 'codec/std', 'cumulus-pallet-aura-ext/std', 'cumulus-pallet-parachain-system/std', 'cumulus-pallet-xcm/std', @@ -80,13 +78,14 @@ std = [ 'frame-try-runtime/std', 'pallet-aura/std', 'pallet-balances/std', - 'pallet-democracy/std', 'pallet-collective/std', - 'pallet-ranked-collective/std', + 'pallet-democracy/std', + 'pallet-gov-origins/std', 'pallet-membership/std', + 'pallet-ranked-collective/std', 'pallet-referenda/std', - 'pallet-gov-origins/std', 'pallet-scheduler/std', + 'parity-scale-codec/std', # 'pallet-contracts/std', # 'pallet-contracts-primitives/std', # 'pallet-contracts-rpc-runtime-api/std', @@ -121,12 +120,11 @@ std = [ 'pallet-structure/std', 'pallet-sudo/std', 'pallet-timestamp/std', - 'pallet-utility/std', 'pallet-transaction-payment-rpc-runtime-api/std', 'pallet-transaction-payment/std', 'pallet-treasury/std', - 'pallet-unique-scheduler-v2/std', 'pallet-unique/std', + 'pallet-utility/std', 'parachain-info/std', 'serde', 'sp-api/std', @@ -140,14 +138,14 @@ std = [ 'sp-std/std', 'sp-transaction-pool/std', 'sp-version/std', + 'staging-xcm-builder/std', + 'staging-xcm-executor/std', + 'staging-xcm/std', 'up-common/std', 'up-data-structs/std', 'up-pov-estimate-rpc/std', 'up-rpc/std', 'up-sponsorship/std', - 'xcm-builder/std', - 'xcm-executor/std', - 'xcm/std', "orml-tokens/std", "orml-traits/std", @@ -187,9 +185,11 @@ try-runtime = [ 'pallet-base-fee/try-runtime', 'pallet-charge-transaction/try-runtime', 'pallet-collective/try-runtime', + 'pallet-collective/try-runtime', 'pallet-common/try-runtime', 'pallet-configuration/try-runtime', 'pallet-democracy/try-runtime', + 'pallet-democracy/try-runtime', 'pallet-ethereum/try-runtime', 'pallet-evm-coder-substrate/try-runtime', 'pallet-evm-contract-helpers/try-runtime', @@ -198,28 +198,25 @@ try-runtime = [ 'pallet-evm/try-runtime', 'pallet-foreign-assets/try-runtime', 'pallet-fungible/try-runtime', + 'pallet-gov-origins/try-runtime', 'pallet-inflation/try-runtime', 'pallet-maintenance/try-runtime', 'pallet-membership/try-runtime', + 'pallet-membership/try-runtime', 'pallet-nonfungible/try-runtime', - 'pallet-democracy/try-runtime', - 'pallet-collective/try-runtime', 'pallet-ranked-collective/try-runtime', - 'pallet-membership/try-runtime', 'pallet-referenda/try-runtime', - 'pallet-gov-origins/try-runtime', - 'pallet-scheduler/try-runtime', 'pallet-refungible/try-runtime', 'pallet-scheduler/try-runtime', + 'pallet-scheduler/try-runtime', 'pallet-structure/try-runtime', 'pallet-sudo/try-runtime', 'pallet-test-utils?/try-runtime', 'pallet-timestamp/try-runtime', - 'pallet-utility/try-runtime', 'pallet-transaction-payment/try-runtime', 'pallet-treasury/try-runtime', - 'pallet-unique-scheduler-v2/try-runtime', 'pallet-unique/try-runtime', + 'pallet-utility/try-runtime', 'pallet-xcm/try-runtime', 'parachain-info/try-runtime', ] @@ -227,20 +224,17 @@ try-runtime = [ app-promotion = [] collator-selection = [] foreign-assets = [] -governance = [] gov-test-timings = [] +governance = [] preimage = [] refungible = [] -unique-scheduler = [] session-test-timings = [] +unique-scheduler = [] ################################################################################ # local dependencies [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-dmp-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } @@ -266,13 +260,14 @@ pallet-session = { workspace = true } pallet-state-trie-migration = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } -pallet-utility = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } pallet-xcm = { workspace = true } parachain-info = { workspace = true } -polkadot-parachain = { workspace = true } +parity-scale-codec = { workspace = true } +polkadot-parachain-primitives = { workspace = true } smallvec = { workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } @@ -287,9 +282,9 @@ sp-session = { workspace = true } sp-std = { workspace = true } sp-transaction-pool = { workspace = true } sp-version = { workspace = true } -xcm = { workspace = true } -xcm-builder = { workspace = true } -xcm-executor = { workspace = true } +staging-xcm = { workspace = true } +staging-xcm-builder = { workspace = true } +staging-xcm-executor = { workspace = true } app-promotion-rpc = { workspace = true } derivative = { workspace = true } @@ -304,8 +299,10 @@ pallet-balances-adapter = { workspace = true } pallet-base-fee = { workspace = true } pallet-charge-transaction = { workspace = true } pallet-collator-selection = { workspace = true } +pallet-collective = { workspace = true } pallet-common = { workspace = true } pallet-configuration = { workspace = true } +pallet-democracy = { workspace = true } pallet-ethereum = { workspace = true } pallet-evm = { workspace = true } pallet-evm-coder-substrate = { workspace = true } @@ -315,18 +312,16 @@ pallet-evm-precompile-simple = { workspace = true } pallet-evm-transaction-payment = { workspace = true } pallet-foreign-assets = { workspace = true } pallet-fungible = { workspace = true } +pallet-gov-origins = { workspace = true } pallet-identity = { workspace = true } pallet-inflation = { workspace = true } pallet-maintenance = { workspace = true } +pallet-membership = { workspace = true } pallet-nonfungible = { workspace = true } -pallet-democracy = { workspace = true } -pallet-collective = { workspace = true } pallet-ranked-collective = { workspace = true } -pallet-membership = { workspace = true } pallet-referenda = { workspace = true } -pallet-gov-origins = { workspace = true } -pallet-scheduler = { workspace = true } pallet-refungible = { workspace = true } +pallet-scheduler = { workspace = true } pallet-structure = { workspace = true } pallet-unique = { workspace = true } precompile-utils-macro = { workspace = true } diff --git a/runtime/quartz/Cargo.toml b/runtime/quartz/Cargo.toml index 8a5ad8012d..c6733a9688 100644 --- a/runtime/quartz/Cargo.toml +++ b/runtime/quartz/Cargo.toml @@ -32,9 +32,11 @@ runtime-benchmarks = [ 'pallet-balances/runtime-benchmarks', 'pallet-collator-selection/runtime-benchmarks', 'pallet-collective/runtime-benchmarks', + 'pallet-collective/runtime-benchmarks', 'pallet-common/runtime-benchmarks', 'pallet-configuration/runtime-benchmarks', 'pallet-democracy/runtime-benchmarks', + 'pallet-democracy/runtime-benchmarks', 'pallet-ethereum/runtime-benchmarks', 'pallet-evm-coder-substrate/runtime-benchmarks', 'pallet-evm-migration/runtime-benchmarks', @@ -44,25 +46,22 @@ runtime-benchmarks = [ 'pallet-inflation/runtime-benchmarks', 'pallet-maintenance/runtime-benchmarks', 'pallet-membership/runtime-benchmarks', + 'pallet-membership/runtime-benchmarks', 'pallet-nonfungible/runtime-benchmarks', - 'pallet-democracy/runtime-benchmarks', - 'pallet-collective/runtime-benchmarks', 'pallet-ranked-collective/runtime-benchmarks', - 'pallet-membership/runtime-benchmarks', 'pallet-referenda/runtime-benchmarks', - 'pallet-scheduler/runtime-benchmarks', 'pallet-refungible/runtime-benchmarks', 'pallet-scheduler/runtime-benchmarks', + 'pallet-scheduler/runtime-benchmarks', 'pallet-structure/runtime-benchmarks', 'pallet-timestamp/runtime-benchmarks', - 'pallet-utility/runtime-benchmarks', 'pallet-unique/runtime-benchmarks', + 'pallet-utility/runtime-benchmarks', 'pallet-xcm/runtime-benchmarks', 'sp-runtime/runtime-benchmarks', - 'xcm-builder/runtime-benchmarks', + 'staging-xcm-builder/runtime-benchmarks', ] std = [ - 'codec/std', 'cumulus-pallet-aura-ext/std', 'cumulus-pallet-parachain-system/std', 'cumulus-pallet-xcm/std', @@ -81,6 +80,7 @@ std = [ 'pallet-democracy/std', 'pallet-membership/std', 'pallet-scheduler/std', + 'parity-scale-codec/std', # 'pallet-contracts/std', # 'pallet-contracts-primitives/std', # 'pallet-contracts-rpc-runtime-api/std', @@ -99,8 +99,10 @@ std = [ 'pallet-base-fee/std', 'pallet-charge-transaction/std', 'pallet-collator-selection/std', + 'pallet-collective/std', 'pallet-common/std', 'pallet-configuration/std', + 'pallet-democracy/std', 'pallet-ethereum/std', 'pallet-evm-coder-substrate/std', 'pallet-evm-contract-helpers/std', @@ -108,24 +110,22 @@ std = [ 'pallet-evm-transaction-payment/std', 'pallet-evm/std', 'pallet-fungible/std', + 'pallet-gov-origins/std', 'pallet-inflation/std', + 'pallet-membership/std', 'pallet-nonfungible/std', - 'pallet-democracy/std', - 'pallet-collective/std', 'pallet-ranked-collective/std', - 'pallet-membership/std', 'pallet-referenda/std', - 'pallet-gov-origins/std', - 'pallet-scheduler/std', 'pallet-refungible/std', + 'pallet-scheduler/std', 'pallet-structure/std', 'pallet-sudo/std', 'pallet-timestamp/std', - 'pallet-utility/std', 'pallet-transaction-payment-rpc-runtime-api/std', 'pallet-transaction-payment/std', 'pallet-treasury/std', 'pallet-unique/std', + 'pallet-utility/std', 'parachain-info/std', 'serde', 'sp-api/std', @@ -139,14 +139,14 @@ std = [ 'sp-std/std', 'sp-transaction-pool/std', 'sp-version/std', + 'staging-xcm-builder/std', + 'staging-xcm-executor/std', + 'staging-xcm/std', 'up-common/std', 'up-data-structs/std', 'up-pov-estimate-rpc/std', 'up-rpc/std', 'up-sponsorship/std', - 'xcm-builder/std', - 'xcm-executor/std', - 'xcm/std', "orml-tokens/std", "orml-traits/std", @@ -181,8 +181,10 @@ try-runtime = [ 'pallet-balances-adapter/try-runtime', 'pallet-balances/try-runtime', 'pallet-charge-transaction/try-runtime', + 'pallet-collective/try-runtime', 'pallet-common/try-runtime', 'pallet-configuration/try-runtime', + 'pallet-democracy/try-runtime', 'pallet-ethereum/try-runtime', 'pallet-evm-coder-substrate/try-runtime', 'pallet-evm-contract-helpers/try-runtime', @@ -191,24 +193,22 @@ try-runtime = [ 'pallet-evm/try-runtime', 'pallet-foreign-assets/try-runtime', 'pallet-fungible/try-runtime', + 'pallet-gov-origins/try-runtime', 'pallet-inflation/try-runtime', 'pallet-maintenance/try-runtime', + 'pallet-membership/try-runtime', 'pallet-nonfungible/try-runtime', - 'pallet-democracy/try-runtime', - 'pallet-collective/try-runtime', 'pallet-ranked-collective/try-runtime', - 'pallet-membership/try-runtime', 'pallet-referenda/try-runtime', - 'pallet-gov-origins/try-runtime', - 'pallet-scheduler/try-runtime', 'pallet-refungible/try-runtime', + 'pallet-scheduler/try-runtime', 'pallet-structure/try-runtime', 'pallet-sudo/try-runtime', 'pallet-timestamp/try-runtime', - 'pallet-utility/try-runtime', 'pallet-transaction-payment/try-runtime', 'pallet-treasury/try-runtime', 'pallet-unique/try-runtime', + 'pallet-utility/try-runtime', 'pallet-xcm/try-runtime', 'parachain-info/try-runtime', ] @@ -216,20 +216,17 @@ try-runtime = [ app-promotion = [] collator-selection = [] foreign-assets = [] +gov-test-timings = [] governance = [] preimage = [] refungible = [] -unique-scheduler = [] -gov-test-timings = [] session-test-timings = [] +unique-scheduler = [] ################################################################################ # local dependencies [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-dmp-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } @@ -255,13 +252,14 @@ pallet-session = { workspace = true } pallet-state-trie-migration = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } -pallet-utility = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } pallet-xcm = { workspace = true } parachain-info = { workspace = true } -polkadot-parachain = { workspace = true } +parity-scale-codec = { workspace = true } +polkadot-parachain-primitives = { workspace = true } smallvec = { workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } @@ -276,9 +274,9 @@ sp-session = { workspace = true } sp-std = { workspace = true } sp-transaction-pool = { workspace = true } sp-version = { workspace = true } -xcm = { workspace = true } -xcm-builder = { workspace = true } -xcm-executor = { workspace = true } +staging-xcm = { workspace = true } +staging-xcm-builder = { workspace = true } +staging-xcm-executor = { workspace = true } app-promotion-rpc = { workspace = true } derivative = { workspace = true } @@ -287,20 +285,20 @@ log = { workspace = true } pallet-app-promotion = { workspace = true } pallet-balances-adapter = { workspace = true } pallet-collator-selection = { workspace = true } +pallet-collective = { workspace = true } pallet-common = { workspace = true } pallet-configuration = { workspace = true } +pallet-democracy = { workspace = true } pallet-fungible = { workspace = true } +pallet-gov-origins = { workspace = true } pallet-identity = { workspace = true } pallet-inflation = { workspace = true } +pallet-membership = { workspace = true } pallet-nonfungible = { workspace = true } -pallet-democracy = { workspace = true } -pallet-collective = { workspace = true } pallet-ranked-collective = { workspace = true } -pallet-membership = { workspace = true } pallet-referenda = { workspace = true } -pallet-gov-origins = { workspace = true } -pallet-scheduler = { workspace = true } pallet-refungible = { workspace = true } +pallet-scheduler = { workspace = true } pallet-structure = { workspace = true } pallet-unique = { workspace = true } scale-info = { workspace = true } @@ -312,7 +310,7 @@ up-rpc = { workspace = true } evm-coder = { workspace = true } fp-rpc = { workspace = true } fp-self-contained = { workspace = true } -num_enum = { version = "0.5.3", default-features = false } +num_enum = { version = "0.7.0", default-features = false } pallet-base-fee = { workspace = true } pallet-charge-transaction = { workspace = true } pallet-ethereum = { workspace = true } diff --git a/runtime/tests/Cargo.toml b/runtime/tests/Cargo.toml index 98ed01c751..020bfb41fd 100644 --- a/runtime/tests/Cargo.toml +++ b/runtime/tests/Cargo.toml @@ -37,12 +37,12 @@ pallet-unique = { workspace = true } pallet-evm-coder-substrate = { workspace = true } -codec = { workspace = true, package = "parity-scale-codec" } +parity-scale-codec = { workspace = true } scale-info = { workspace = true } evm-coder = { workspace = true } +staging-xcm = { workspace = true } up-sponsorship = { workspace = true } -xcm = { workspace = true } [dev-dependencies] pallet-common = { workspace = true, features = ["tests"] } diff --git a/runtime/unique/Cargo.toml b/runtime/unique/Cargo.toml index f39d9d3fde..6b03dfeeb7 100644 --- a/runtime/unique/Cargo.toml +++ b/runtime/unique/Cargo.toml @@ -29,9 +29,11 @@ runtime-benchmarks = [ 'pallet-balances/runtime-benchmarks', 'pallet-collator-selection/runtime-benchmarks', 'pallet-collective/runtime-benchmarks', + 'pallet-collective/runtime-benchmarks', 'pallet-common/runtime-benchmarks', 'pallet-configuration/runtime-benchmarks', 'pallet-democracy/runtime-benchmarks', + 'pallet-democracy/runtime-benchmarks', 'pallet-ethereum/runtime-benchmarks', 'pallet-evm-coder-substrate/runtime-benchmarks', 'pallet-evm-migration/runtime-benchmarks', @@ -41,26 +43,23 @@ runtime-benchmarks = [ 'pallet-inflation/runtime-benchmarks', 'pallet-maintenance/runtime-benchmarks', 'pallet-membership/runtime-benchmarks', + 'pallet-membership/runtime-benchmarks', 'pallet-nonfungible/runtime-benchmarks', - 'pallet-democracy/runtime-benchmarks', - 'pallet-collective/runtime-benchmarks', 'pallet-ranked-collective/runtime-benchmarks', - 'pallet-membership/runtime-benchmarks', 'pallet-referenda/runtime-benchmarks', - 'pallet-scheduler/runtime-benchmarks', 'pallet-refungible/runtime-benchmarks', 'pallet-scheduler/runtime-benchmarks', + 'pallet-scheduler/runtime-benchmarks', 'pallet-structure/runtime-benchmarks', 'pallet-timestamp/runtime-benchmarks', - 'pallet-utility/runtime-benchmarks', 'pallet-unique/runtime-benchmarks', + 'pallet-utility/runtime-benchmarks', 'pallet-xcm/runtime-benchmarks', 'sp-runtime/runtime-benchmarks', + 'staging-xcm-builder/runtime-benchmarks', 'up-data-structs/runtime-benchmarks', - 'xcm-builder/runtime-benchmarks', ] std = [ - 'codec/std', 'cumulus-pallet-aura-ext/std', 'cumulus-pallet-parachain-system/std', 'cumulus-pallet-xcm/std', @@ -78,6 +77,7 @@ std = [ 'pallet-democracy/std', 'pallet-membership/std', 'pallet-scheduler/std', + 'parity-scale-codec/std', # 'pallet-contracts/std', # 'pallet-contracts-primitives/std', # 'pallet-contracts-rpc-runtime-api/std', @@ -97,8 +97,10 @@ std = [ 'pallet-base-fee/std', 'pallet-charge-transaction/std', 'pallet-collator-selection/std', + 'pallet-collective/std', 'pallet-common/std', 'pallet-configuration/std', + 'pallet-democracy/std', 'pallet-ethereum/std', 'pallet-evm-coder-substrate/std', 'pallet-evm-contract-helpers/std', @@ -106,24 +108,22 @@ std = [ 'pallet-evm-transaction-payment/std', 'pallet-evm/std', 'pallet-fungible/std', + 'pallet-gov-origins/std', 'pallet-inflation/std', + 'pallet-membership/std', 'pallet-nonfungible/std', - 'pallet-democracy/std', - 'pallet-collective/std', 'pallet-ranked-collective/std', - 'pallet-membership/std', 'pallet-referenda/std', - 'pallet-gov-origins/std', - 'pallet-scheduler/std', 'pallet-refungible/std', + 'pallet-scheduler/std', 'pallet-structure/std', 'pallet-sudo/std', 'pallet-timestamp/std', - 'pallet-utility/std', 'pallet-transaction-payment-rpc-runtime-api/std', 'pallet-transaction-payment/std', 'pallet-treasury/std', 'pallet-unique/std', + 'pallet-utility/std', 'parachain-info/std', 'sp-api/std', 'sp-block-builder/std', @@ -136,14 +136,14 @@ std = [ 'sp-std/std', 'sp-transaction-pool/std', 'sp-version/std', + 'staging-xcm-builder/std', + 'staging-xcm-executor/std', + 'staging-xcm/std', 'up-common/std', 'up-data-structs/std', 'up-pov-estimate-rpc/std', 'up-rpc/std', 'up-sponsorship/std', - 'xcm-builder/std', - 'xcm-executor/std', - 'xcm/std', "orml-tokens/std", "orml-traits/std", @@ -180,9 +180,11 @@ try-runtime = [ 'pallet-balances/try-runtime', 'pallet-charge-transaction/try-runtime', 'pallet-collective/try-runtime', + 'pallet-collective/try-runtime', 'pallet-common/try-runtime', 'pallet-configuration/try-runtime', 'pallet-democracy/try-runtime', + 'pallet-democracy/try-runtime', 'pallet-ethereum/try-runtime', 'pallet-evm-coder-substrate/try-runtime', 'pallet-evm-contract-helpers/try-runtime', @@ -191,26 +193,24 @@ try-runtime = [ 'pallet-evm/try-runtime', 'pallet-foreign-assets/try-runtime', 'pallet-fungible/try-runtime', + 'pallet-gov-origins/try-runtime', 'pallet-inflation/try-runtime', 'pallet-maintenance/try-runtime', 'pallet-membership/try-runtime', + 'pallet-membership/try-runtime', 'pallet-nonfungible/try-runtime', - 'pallet-democracy/try-runtime', - 'pallet-collective/try-runtime', 'pallet-ranked-collective/try-runtime', - 'pallet-membership/try-runtime', 'pallet-referenda/try-runtime', - 'pallet-gov-origins/try-runtime', - 'pallet-scheduler/try-runtime', 'pallet-refungible/try-runtime', 'pallet-scheduler/try-runtime', + 'pallet-scheduler/try-runtime', 'pallet-structure/try-runtime', 'pallet-sudo/try-runtime', 'pallet-timestamp/try-runtime', - 'pallet-utility/try-runtime', 'pallet-transaction-payment/try-runtime', 'pallet-treasury/try-runtime', 'pallet-unique/try-runtime', + 'pallet-utility/try-runtime', 'pallet-xcm/try-runtime', 'parachain-info/try-runtime', ] @@ -219,20 +219,17 @@ unique-runtime = ['app-promotion', 'foreign-assets', 'refungible'] app-promotion = [] collator-selection = [] foreign-assets = [] +gov-test-timings = [] governance = [] preimage = [] refungible = [] -unique-scheduler = [] -gov-test-timings = [] session-test-timings = [] +unique-scheduler = [] ################################################################################ # local dependencies [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-dmp-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } @@ -258,13 +255,14 @@ pallet-session = { workspace = true } pallet-state-trie-migration = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } -pallet-utility = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } pallet-xcm = { workspace = true } parachain-info = { workspace = true } -polkadot-parachain = { workspace = true } +parity-scale-codec = { workspace = true } +polkadot-parachain-primitives = { workspace = true } smallvec = { workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } @@ -279,9 +277,9 @@ sp-session = { workspace = true } sp-std = { workspace = true } sp-transaction-pool = { workspace = true } sp-version = { workspace = true } -xcm = { workspace = true } -xcm-builder = { workspace = true } -xcm-executor = { workspace = true } +staging-xcm = { workspace = true } +staging-xcm-builder = { workspace = true } +staging-xcm-executor = { workspace = true } app-promotion-rpc = { workspace = true } derivative = { workspace = true } @@ -289,20 +287,20 @@ log = { workspace = true } pallet-app-promotion = { workspace = true } pallet-balances-adapter = { workspace = true } pallet-collator-selection = { workspace = true } +pallet-collective = { workspace = true } pallet-common = { workspace = true } pallet-configuration = { workspace = true } +pallet-democracy = { workspace = true } pallet-fungible = { workspace = true } +pallet-gov-origins = { workspace = true } pallet-identity = { workspace = true } pallet-inflation = { workspace = true } +pallet-membership = { workspace = true } pallet-nonfungible = { workspace = true } -pallet-democracy = { workspace = true } -pallet-collective = { workspace = true } pallet-ranked-collective = { workspace = true } -pallet-membership = { workspace = true } pallet-referenda = { workspace = true } -pallet-gov-origins = { workspace = true } -pallet-scheduler = { workspace = true } pallet-refungible = { workspace = true } +pallet-scheduler = { workspace = true } pallet-structure = { workspace = true } pallet-unique = { workspace = true } scale-info = { workspace = true } diff --git a/test-pallets/utils/Cargo.toml b/test-pallets/utils/Cargo.toml index 83db7b4351..00d009c159 100644 --- a/test-pallets/utils/Cargo.toml +++ b/test-pallets/utils/Cargo.toml @@ -6,21 +6,21 @@ publish = false version = "0.1.0" [dependencies] -# Note: `package = "parity-scale-codec"` must be supplied since the `Encode` macro searches for it. -codec = { workspace = true, package = "parity-scale-codec" } - frame-support = { workspace = true } frame-system = { workspace = true } +parity-scale-codec = { workspace = true } scale-info = { workspace = true } +sp-runtime = { workspace = true } sp-std = { workspace = true } [features] default = ["std"] std = [ - "codec/std", "frame-support/std", "frame-system/std", + "parity-scale-codec/std", "scale-info/std", + "sp-runtime/std", "sp-std/std", ] -try-runtime = ["frame-support/try-runtime", "pallet-unique-scheduler-v2/try-runtime"] +try-runtime = ["frame-support/try-runtime"] From d63bb381415c63fdb3d3082ae1688ff03fe36191 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:07:12 +0200 Subject: [PATCH 092/143] build: regenerate lockfile --- Cargo.lock | 3884 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 2337 insertions(+), 1547 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c5dc6c5183..a825878fec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,16 +18,16 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ - "gimli", + "gimli 0.27.3", ] [[package]] name = "addr2line" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ - "gimli", + "gimli 0.28.0", ] [[package]] @@ -115,9 +115,9 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ "aead 0.5.2", "aes 0.8.3", @@ -172,13 +172,19 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.4" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" +checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "always-assert" version = "0.1.3" @@ -211,30 +217,29 @@ dependencies = [ [[package]] name = "anstream" -version = "0.3.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", - "is-terminal", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" dependencies = [ "utf8parse", ] @@ -250,9 +255,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "1.0.2" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" dependencies = [ "anstyle", "windows-sys 0.48.0", @@ -260,9 +265,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.74" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c6f84b74db2535ebae81eede2f39b947dcbf01d093ae5f791e5dd414a1bf289" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "app-promotion-rpc" @@ -284,6 +289,20 @@ dependencies = [ "num-traits", ] +[[package]] +name = "aquamarine" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1" +dependencies = [ + "include_dir", + "itertools 0.10.5", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "arc-swap" version = "1.6.0" @@ -291,10 +310,189 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] -name = "array-bytes" -version = "4.2.0" +name = "ark-bls12-381" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c775f0d12169cba7aae4caeb547bb6a50781c7449a8aa53793827c9ec4abf488" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", +] + +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", + "itertools 0.10.5", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ed-on-bls12-381-bandersnatch" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9cde0f2aa063a2a5c28d39b47761aa102bda7c13c84fc118a61b87c7b2f785c" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-std", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm", + "ark-ff-macros", + "ark-serialize", + "ark-std", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", +] + +[[package]] +name = "ark-scale" +version = "0.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49b08346a3e38e2be792ef53ee168623c9244d968ff00cd70fb9932f6fe36393" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "parity-scale-codec", +] + +[[package]] +name = "ark-scale" +version = "0.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bd73bb6ddb72630987d37fa963e99196896c0d0ea81b7c894567e74a2f83af" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "parity-scale-codec", + "scale-info", +] + +[[package]] +name = "ark-secret-scalar" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=3119f51#3119f51b54b69308abfb0671f6176cb125ae1bf1" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "ark-transcript", + "digest 0.10.7", + "rand_core 0.6.4", + "zeroize", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-serialize-derive", + "ark-std", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-std" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52f63c5c1316a16a4b35eaac8b76a98248961a533f061684cb2a7cb0eafb6c6" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-transcript" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=3119f51#3119f51b54b69308abfb0671f6176cb125ae1bf1" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "digest 0.10.7", + "rand_core 0.6.4", + "sha3 0.10.8", +] [[package]] name = "array-bytes" @@ -308,6 +506,15 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +[[package]] +name = "arrayvec" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +dependencies = [ + "nodrop", +] + [[package]] name = "arrayvec" version = "0.5.2" @@ -333,7 +540,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.25", + "time", ] [[package]] @@ -349,7 +556,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.25", + "time", ] [[package]] @@ -404,6 +611,32 @@ dependencies = [ "futures-core", ] +[[package]] +name = "async-executor" +version = "1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c1da3ae8dabd9c00f453a329dfe1fb28da3c0a72e2478cdcd93171740c20499" +dependencies = [ + "async-lock", + "async-task", + "concurrent-queue", + "fastrand 2.0.1", + "futures-lite", + "slab", +] + +[[package]] +name = "async-fs" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" +dependencies = [ + "async-lock", + "autocfg", + "blocking", + "futures-lite", +] + [[package]] name = "async-io" version = "1.13.0" @@ -418,7 +651,7 @@ dependencies = [ "log", "parking", "polling", - "rustix 0.37.23", + "rustix 0.37.24", "slab", "socket2 0.4.9", "waker-fn", @@ -433,17 +666,52 @@ dependencies = [ "event-listener", ] +[[package]] +name = "async-net" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0434b1ed18ce1cf5769b8ac540e33f01fa9471058b5e89da9e06f3c882a8c12f" +dependencies = [ + "async-io", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-process" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" +dependencies = [ + "async-io", + "async-lock", + "autocfg", + "blocking", + "cfg-if", + "event-listener", + "futures-lite", + "rustix 0.37.24", + "signal-hook", + "windows-sys 0.48.0", +] + [[package]] name = "async-recursion" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" +checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] +[[package]] +name = "async-task" +version = "4.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9441c6b2fe128a7c2bf680a44c34d0df31ce09e5b7e401fcca3faa483dbc921" + [[package]] name = "async-trait" version = "0.1.73" @@ -452,7 +720,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -465,14 +733,20 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", ] +[[package]] +name = "atomic-take" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8ab6b55fe97976e46f91ddbed8d147d966475dc29b2032757ba47e02376fbc3" + [[package]] name = "atomic-waker" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "atty" @@ -505,19 +779,41 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ - "addr2line 0.20.0", + "addr2line 0.21.0", "cc", "cfg-if", "libc", "miniz_oxide", - "object 0.31.1", + "object 0.32.1", "rustc-demangle", ] +[[package]] +name = "bandersnatch_vrfs" +version = "0.0.1" +source = "git+https://github.com/w3f/ring-vrf?rev=3119f51#3119f51b54b69308abfb0671f6176cb125ae1bf1" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ff", + "ark-scale 0.0.11", + "ark-serialize", + "ark-std", + "dleq_vrf", + "fflonk", + "merlin 3.0.0", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "ring 0.1.0", + "sha2 0.10.8", + "zeroize", +] + [[package]] name = "base-x" version = "0.2.11" @@ -544,9 +840,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.2" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "base64ct" @@ -566,7 +862,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "hash-db 0.16.0", "log", @@ -593,15 +889,30 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "prettyplease 0.2.12", + "prettyplease 0.2.15", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.28", + "syn 2.0.37", +] + +[[package]] +name = "bip39" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" +dependencies = [ + "bitcoin_hashes", ] +[[package]] +name = "bitcoin_hashes" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" + [[package]] name = "bitflags" version = "1.3.2" @@ -635,40 +946,49 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq 0.1.5", +] + [[package]] name = "blake2b_simd" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" +checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" dependencies = [ "arrayref", "arrayvec 0.7.4", - "constant_time_eq 0.2.6", + "constant_time_eq 0.3.0", ] [[package]] name = "blake2s_simd" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6637f448b9e61dfadbdcbae9a885fadee1f3eaffb1f8d3c1965d3ade8bdfd44f" +checksum = "94230421e395b9920d23df13ea5d77a20e1725331f90fbbf6df6040b33f756ae" dependencies = [ "arrayref", "arrayvec 0.7.4", - "constant_time_eq 0.2.6", + "constant_time_eq 0.3.0", ] [[package]] name = "blake3" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5" +checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" dependencies = [ "arrayref", "arrayvec 0.7.4", "cc", "cfg-if", "constant_time_eq 0.3.0", - "digest 0.10.7", ] [[package]] @@ -726,6 +1046,22 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +[[package]] +name = "blocking" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94c4ef1f913d78636d78d538eec1f18de81e481f44b1be0a81060090530846e1" +dependencies = [ + "async-channel", + "async-lock", + "async-task", + "fastrand 2.0.1", + "futures-io", + "futures-lite", + "piper", + "tracing", +] + [[package]] name = "bondrewd" version = "0.1.14" @@ -773,11 +1109,20 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +[[package]] +name = "bs58" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +dependencies = [ + "tinyvec", +] + [[package]] name = "bstr" -version = "1.6.0" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" +checksum = "4c2f7349907b712260e64b0afe2f84692af14a454be26187d9df565c7f69266a" dependencies = [ "memchr", "serde", @@ -794,9 +1139,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byte-slice-cast" @@ -812,9 +1157,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" +checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" [[package]] name = "byteorder" @@ -824,9 +1169,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "bzip2-sys" @@ -865,7 +1210,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.18", + "semver 1.0.19", "serde", "serde_json", "thiserror", @@ -873,9 +1218,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.82" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "jobserver", "libc", @@ -903,9 +1248,9 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.4" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b40ccee03b5175c18cde8f37e7d2a33bcef6f8ec8f7cc0d81090d1bb380949c9" +checksum = "03915af431787e6ffdcc74c645077518c6b6e01f80b761e0fbbfa288536311b3" dependencies = [ "smallvec", ] @@ -935,42 +1280,52 @@ dependencies = [ ] [[package]] -name = "chacha20poly1305" +name = "chacha20" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ - "aead 0.4.3", - "chacha20", - "cipher 0.3.0", - "poly1305", - "zeroize", -] - + "cfg-if", + "cipher 0.4.4", + "cpufeatures", +] + +[[package]] +name = "chacha20poly1305" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +dependencies = [ + "aead 0.4.3", + "chacha20 0.8.2", + "cipher 0.3.0", + "poly1305 0.7.2", + "zeroize", +] + [[package]] name = "chrono" -version = "0.4.26" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", - "time 0.1.45", "wasm-bindgen", - "winapi", + "windows-targets 0.48.5", ] [[package]] name = "cid" -version = "0.8.6" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ed9c8b2d17acb8110c46f1da5bf4a696d745e1474a16db0cd2b49cd0249bf2" +checksum = "b9b68e3193982cd54187d71afdb2a271ad4cf8af157858e9cb911b91321de143" dependencies = [ "core2", "multibase", - "multihash 0.16.3", + "multihash", "serde", "unsigned-varint", ] @@ -1025,20 +1380,19 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.21" +version = "4.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd" +checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" dependencies = [ "clap_builder", "clap_derive", - "once_cell", ] [[package]] name = "clap_builder" -version = "4.3.21" +version = "4.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa" +checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" dependencies = [ "anstream", "anstyle", @@ -1048,27 +1402,27 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.3.12" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" +checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "clap_lex" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" +checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" [[package]] name = "coarsetime" -version = "0.1.23" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a90d114103adbc625300f346d4d09dfb4ab1c4a8df6868435dd903392ecf4354" +checksum = "99280f81a35511dda7d44f7c943491b41d3ac6fd0b54aea92498bec8612a2423" dependencies = [ "libc", "once_cell", @@ -1094,20 +1448,40 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "comfy-table" -version = "6.2.0" +version = "7.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e959d788268e3bf9d35ace83e81b124190378e4c91c9067524675e33394b8ba" +checksum = "9ab77dbd8adecaf3f0db40581631b995f312a8a5ae3aa9993188bb8f23d83a5b" dependencies = [ "strum", "strum_macros", "unicode-width", ] +[[package]] +name = "common" +version = "0.1.0" +source = "git+https://github.com/w3f/ring-proof?rev=0e948f3#0e948f3c28cbacecdd3020403c4841c0eb339213" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "fflonk", + "merlin 3.0.0", +] + +[[package]] +name = "common-path" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2382f75942f4b3be3690fe4f86365e9c853c1587d6ee58212cebf6e2a9ccd101" + [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400" dependencies = [ "crossbeam-utils", ] @@ -1131,11 +1505,33 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +[[package]] +name = "const-random" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368a7a772ead6ce7e1de82bfb04c485f3db8ec744f72925af5735e29a22cc18e" +dependencies = [ + "const-random-macro", + "proc-macro-hack", +] + +[[package]] +name = "const-random-macro" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d7d6ab3c3a2282db210df5f02c4dab6e0a7057af0fb7ebd4070f30fe05c0ddb" +dependencies = [ + "getrandom 0.2.10", + "once_cell", + "proc-macro-hack", + "tiny-keccak", +] + [[package]] name = "constant_time_eq" -version = "0.2.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "constant_time_eq" @@ -1223,7 +1619,7 @@ dependencies = [ "cranelift-codegen-shared", "cranelift-entity", "cranelift-isle", - "gimli", + "gimli 0.27.3", "hashbrown 0.13.2", "log", "regalloc2", @@ -1293,7 +1689,7 @@ dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "itertools", + "itertools 0.10.5", "log", "smallvec", "wasmparser", @@ -1324,16 +1720,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "crossbeam-channel" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - [[package]] name = "crossbeam-deque" version = "0.8.3" @@ -1397,9 +1783,9 @@ dependencies = [ [[package]] name = "crypto-bigint" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" +checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" dependencies = [ "generic-array 0.14.7", "rand_core 0.6.4", @@ -1459,12 +1845,13 @@ dependencies = [ [[package]] name = "cumulus-client-cli" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "clap", "parity-scale-codec", "sc-chain-spec", "sc-cli", + "sc-client-api", "sc-service", "sp-core", "sp-runtime", @@ -1474,7 +1861,7 @@ dependencies = [ [[package]] name = "cumulus-client-collator" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "cumulus-client-consensus-common", "cumulus-client-network", @@ -1497,25 +1884,29 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-aura" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "cumulus-client-collator", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", + "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-relay-chain-interface", "futures", "parity-scale-codec", "polkadot-node-primitives", + "polkadot-node-subsystem", "polkadot-overseer", "polkadot-primitives", "sc-client-api", "sc-consensus", "sc-consensus-aura", + "sc-consensus-babe", "sc-consensus-slots", "sc-telemetry", + "schnellru", "sp-api", "sp-application-crypto", "sp-block-builder", @@ -1535,7 +1926,7 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-common" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "cumulus-client-pov-recovery", @@ -1548,11 +1939,14 @@ dependencies = [ "polkadot-primitives", "sc-client-api", "sc-consensus", + "sc-consensus-babe", "schnellru", "sp-blockchain", "sp-consensus", + "sp-consensus-slots", "sp-core", "sp-runtime", + "sp-timestamp", "sp-trie", "substrate-prometheus-endpoint", "tracing", @@ -1561,7 +1955,7 @@ dependencies = [ [[package]] name = "cumulus-client-consensus-proposer" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "anyhow", "async-trait", @@ -1576,7 +1970,7 @@ dependencies = [ [[package]] name = "cumulus-client-network" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "cumulus-relay-chain-interface", @@ -1585,7 +1979,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", "polkadot-node-primitives", - "polkadot-parachain", + "polkadot-parachain-primitives", "polkadot-primitives", "sc-client-api", "sp-blockchain", @@ -1599,7 +1993,7 @@ dependencies = [ [[package]] name = "cumulus-client-pov-recovery" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -1623,7 +2017,7 @@ dependencies = [ [[package]] name = "cumulus-client-service" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "cumulus-client-cli", "cumulus-client-collator", @@ -1658,11 +2052,13 @@ dependencies = [ [[package]] name = "cumulus-pallet-aura-ext" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ + "cumulus-pallet-parachain-system", "frame-support", "frame-system", "pallet-aura", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-application-crypto", @@ -1674,7 +2070,7 @@ dependencies = [ [[package]] name = "cumulus-pallet-dmp-queue" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1685,13 +2081,13 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", - "xcm", + "staging-xcm", ] [[package]] name = "cumulus-pallet-parachain-system" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bytes", "cumulus-pallet-parachain-system-proc-macro", @@ -1703,7 +2099,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "parity-scale-codec", - "polkadot-parachain", + "polkadot-parachain-primitives", "scale-info", "sp-core", "sp-externalities", @@ -1714,24 +2110,25 @@ dependencies = [ "sp-std", "sp-trie", "sp-version", - "xcm", + "staging-xcm", + "trie-db", ] [[package]] name = "cumulus-pallet-parachain-system-proc-macro" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "cumulus-pallet-xcm" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1741,15 +2138,16 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", - "xcm", + "staging-xcm", ] [[package]] name = "cumulus-pallet-xcmp-queue" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "cumulus-primitives-core", + "frame-benchmarking", "frame-support", "frame-system", "log", @@ -1760,31 +2158,45 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", - "xcm", - "xcm-executor", + "staging-xcm", + "staging-xcm-executor", +] + +[[package]] +name = "cumulus-primitives-aura" +version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" +dependencies = [ + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-primitives", + "sp-api", + "sp-consensus-aura", + "sp-runtime", + "sp-std", ] [[package]] name = "cumulus-primitives-core" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "polkadot-core-primitives", - "polkadot-parachain", + "polkadot-parachain-primitives", "polkadot-primitives", "scale-info", "sp-api", "sp-runtime", "sp-std", "sp-trie", - "xcm", + "staging-xcm", ] [[package]] name = "cumulus-primitives-parachain-inherent" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -1807,7 +2219,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-timestamp" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "cumulus-primitives-core", "futures", @@ -1820,7 +2232,7 @@ dependencies = [ [[package]] name = "cumulus-primitives-utility" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "cumulus-primitives-core", "frame-support", @@ -1830,15 +2242,15 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", - "xcm", - "xcm-builder", - "xcm-executor", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", ] [[package]] name = "cumulus-relay-chain-inprocess-interface" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -1846,7 +2258,6 @@ dependencies = [ "futures", "futures-timer", "polkadot-cli", - "polkadot-client", "polkadot-service", "sc-cli", "sc-client-api", @@ -1863,7 +2274,7 @@ dependencies = [ [[package]] name = "cumulus-relay-chain-interface" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "cumulus-primitives-core", @@ -1881,15 +2292,14 @@ dependencies = [ [[package]] name = "cumulus-relay-chain-minimal-node" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 6.1.0", + "array-bytes", "async-trait", "cumulus-primitives-core", "cumulus-relay-chain-interface", "cumulus-relay-chain-rpc-interface", "futures", - "lru 0.9.0", "polkadot-availability-recovery", "polkadot-collator-protocol", "polkadot-core-primitives", @@ -1901,47 +2311,53 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "sc-authority-discovery", - "sc-client-api", "sc-network", "sc-network-common", "sc-service", "sc-tracing", "sc-utils", + "schnellru", "sp-api", - "sp-blockchain", "sp-consensus", "sp-consensus-babe", "sp-runtime", - "tokio", "tracing", ] [[package]] name = "cumulus-relay-chain-rpc-interface" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "cumulus-primitives-core", "cumulus-relay-chain-interface", + "either", "futures", "futures-timer", "jsonrpsee", - "lru 0.9.0", "parity-scale-codec", + "pin-project", "polkadot-overseer", + "rand 0.8.5", "sc-client-api", "sc-rpc-api", "sc-service", + "schnellru", "serde", "serde_json", + "smoldot", + "smoldot-light", "sp-api", "sp-authority-discovery", "sp-consensus-babe", "sp-core", + "sp-runtime", "sp-state-machine", "sp-storage", + "thiserror", "tokio", + "tokio-util", "tracing", "url", ] @@ -1949,7 +2365,7 @@ dependencies = [ [[package]] name = "cumulus-test-relay-sproof-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "cumulus-primitives-core", "parity-scale-codec", @@ -1957,6 +2373,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-std", + "sp-trie", ] [[package]] @@ -1987,23 +2404,50 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.1" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" dependencies = [ "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", "fiat-crypto", - "packed_simd_2", - "platforms 3.0.2", + "platforms", + "rustc_version", "subtle", "zeroize", ] +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.37", +] + +[[package]] +name = "curve25519-dalek-ng" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c359b7249347e46fb28804470d071c921156ad62b3eef5d34e2ba867533dec8" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.6.4", + "subtle-ng", + "zeroize", +] + [[package]] name = "cxx" -version = "1.0.105" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666a3ec767f4bbaf0dcfcc3b4ea048b90520b254fdf88813e763f4c762636c14" +checksum = "bbe98ba1789d56fb3db3bee5e032774d4f421b685de7ba703643584ba24effbe" dependencies = [ "cc", "cxxbridge-flags", @@ -2013,9 +2457,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.105" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162bec16c4cc28b19e26db0197b60ba5480fdb9a4cbf0f4c6c104a937741b78e" +checksum = "c4ce20f6b8433da4841b1dadfb9468709868022d829d5ca1f2ffbda928455ea3" dependencies = [ "cc", "codespan-reporting", @@ -2023,24 +2467,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "cxxbridge-flags" -version = "1.0.105" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6e8c238aadc4b9f2c00269d04c87abb23f96dd240803872536eed1a304bb40e" +checksum = "20888d9e1d2298e2ff473cee30efe7d5036e437857ab68bbfea84c74dba91da2" [[package]] name = "cxxbridge-macro" -version = "1.0.105" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59d9ffb4193dd22180b8d5747b1e095c3d9c9c665ce39b0483a488948f437e06" +checksum = "2fa16a70dd58129e4dfffdff535fb1bce66673f7bbeec4a5a1765a504e1ccd84" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -2155,9 +2599,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" +checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" [[package]] name = "derivative" @@ -2310,7 +2754,51 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", +] + +[[package]] +name = "dleq_vrf" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=3119f51#3119f51b54b69308abfb0671f6176cb125ae1bf1" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-scale 0.0.10", + "ark-secret-scalar", + "ark-serialize", + "ark-std", + "ark-transcript", + "arrayvec 0.7.4", + "rand_core 0.6.4", + "zeroize", +] + +[[package]] +name = "docify" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ee528c501ddd15d5181997e9518e59024844eac44fd1e40cb20ddb2a8562fa" +dependencies = [ + "docify_macros", +] + +[[package]] +name = "docify_macros" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca01728ab2679c464242eca99f94e2ce0514b52ac9ad950e2ed03fca991231c" +dependencies = [ + "common-path", + "derive-syn-parse", + "once_cell", + "proc-macro2", + "quote", + "regex", + "syn 2.0.37", + "termcolor", + "toml 0.7.8", + "walkdir", ] [[package]] @@ -2354,9 +2842,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "304e6508efa593091e97a9abbc10f90aa7ca635b6d2784feff3c89d41dd12272" +checksum = "23d2f3407d9a573d666de4b5bdf10569d73ca9478087346697dcbae6244bfbcd" [[package]] name = "ecdsa" @@ -2386,24 +2874,25 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.3" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" +checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ - "signature 1.6.4", + "pkcs8 0.10.2", + "signature 2.1.0", ] [[package]] name = "ed25519-dalek" -version = "1.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ - "curve25519-dalek 3.2.0", + "curve25519-dalek 4.1.1", "ed25519", - "rand 0.7.3", + "rand_core 0.6.4", "serde", - "sha2 0.9.9", + "sha2 0.10.8", "zeroize", ] @@ -2421,6 +2910,21 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ed25519-zebra" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" +dependencies = [ + "curve25519-dalek 4.1.1", + "ed25519", + "hashbrown 0.14.1", + "hex", + "rand_core 0.6.4", + "sha2 0.10.8", + "zeroize", +] + [[package]] name = "either" version = "1.9.0" @@ -2456,7 +2960,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" dependencies = [ "base16ct 0.2.0", - "crypto-bigint 0.5.2", + "crypto-bigint 0.5.3", "digest 0.10.7", "ff 0.13.0", "generic-array 0.14.7", @@ -2488,33 +2992,33 @@ dependencies = [ [[package]] name = "enumflags2" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c041f5090df68b32bcd905365fd51769c8b9d553fe87fde0b683534f10c01bd2" +checksum = "5998b4f30320c9d93aed72f63af821bfdac50465b75428fce77b48ec482c3939" dependencies = [ "enumflags2_derive", ] [[package]] name = "enumflags2_derive" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" +checksum = "f95e2801cd355d4a1a3e3953ce6ee5ae9603a5c833455343a8bfe3f44d418246" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "enumn" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b893c4eb2dc092c811165f84dc7447fae16fb66521717968c34c509b39b1a5c5" +checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -2557,9 +3061,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" +checksum = "add4f07d43996f76ef320709726a556a9d4f965d9410d8d0271132d2f8293480" dependencies = [ "errno-dragonfly", "libc", @@ -2754,19 +3258,6 @@ dependencies = [ "quote", ] -[[package]] -name = "expander" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f360349150728553f92e4c997a16af8915f418d3a0f21b440d34c5632f16ed84" -dependencies = [ - "blake2", - "fs-err", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "expander" version = "2.0.0" @@ -2777,7 +3268,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -2803,9 +3294,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fatality" @@ -2832,10 +3323,22 @@ dependencies = [ "thiserror", ] +[[package]] +name = "fc-api" +version = "1.0.0-dev" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" +dependencies = [ + "async-trait", + "fp-storage", + "parity-scale-codec", + "sp-core", + "sp-runtime", +] + [[package]] name = "fc-consensus" version = "2.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "async-trait", "fp-consensus", @@ -2851,9 +3354,10 @@ dependencies = [ [[package]] name = "fc-db" version = "2.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "async-trait", + "fc-api", "fp-storage", "kvdb-rocksdb", "log", @@ -2871,7 +3375,7 @@ dependencies = [ [[package]] name = "fc-mapping-sync" version = "2.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "fc-db", "fc-storage", @@ -2892,16 +3396,15 @@ dependencies = [ [[package]] name = "fc-rpc" version = "2.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "ethereum", "ethereum-types", "evm", - "fc-db", + "fc-api", "fc-mapping-sync", "fc-rpc-core", "fc-storage", - "fp-ethereum", "fp-evm", "fp-rpc", "fp-storage", @@ -2910,13 +3413,13 @@ dependencies = [ "jsonrpsee", "libsecp256k1", "log", - "lru 0.8.1", "pallet-evm", "parity-scale-codec", "prometheus", "rand 0.8.5", "rlp", "sc-client-api", + "sc-consensus-aura", "sc-network", "sc-network-common", "sc-network-sync", @@ -2925,24 +3428,29 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", + "schnellru", "serde", "sp-api", "sp-block-builder", "sp-blockchain", "sp-consensus", + "sp-consensus-aura", "sp-core", + "sp-inherents", "sp-io", "sp-runtime", "sp-state-machine", "sp-storage", + "sp-timestamp", "substrate-prometheus-endpoint", + "thiserror", "tokio", ] [[package]] name = "fc-rpc-core" version = "1.1.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "ethereum", "ethereum-types", @@ -2955,7 +3463,7 @@ dependencies = [ [[package]] name = "fc-storage" version = "1.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "ethereum", "ethereum-types", @@ -2999,11 +3507,24 @@ dependencies = [ "subtle", ] +[[package]] +name = "fflonk" +version = "0.1.0" +source = "git+https://github.com/w3f/fflonk#26a5045b24e169cffc1f9328ca83d71061145c40" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "merlin 3.0.0", +] + [[package]] name = "fiat-crypto" -version = "0.1.20" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" +checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" [[package]] name = "file-per-thread-logger" @@ -3090,7 +3611,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", ] @@ -3107,7 +3628,7 @@ dependencies = [ [[package]] name = "fp-account" version = "1.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "hex", "impl-serde", @@ -3126,7 +3647,7 @@ dependencies = [ [[package]] name = "fp-consensus" version = "2.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "ethereum", "parity-scale-codec", @@ -3138,13 +3659,12 @@ dependencies = [ [[package]] name = "fp-ethereum" version = "1.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "ethereum", "ethereum-types", "fp-evm", "frame-support", - "num_enum 0.6.1", "parity-scale-codec", "sp-std", ] @@ -3152,11 +3672,11 @@ dependencies = [ [[package]] name = "fp-evm" version = "3.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "evm", "frame-support", - "impl-trait-for-tuples", + "num_enum", "parity-scale-codec", "scale-info", "serde", @@ -3168,7 +3688,7 @@ dependencies = [ [[package]] name = "fp-rpc" version = "3.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "ethereum", "ethereum-types", @@ -3185,7 +3705,7 @@ dependencies = [ [[package]] name = "fp-self-contained" version = "1.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "frame-support", "parity-scale-codec", @@ -3197,7 +3717,7 @@ dependencies = [ [[package]] name = "fp-storage" version = "2.0.0" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "parity-scale-codec", "serde", @@ -3212,7 +3732,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "frame-support-procedural", @@ -3237,10 +3757,10 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "Inflector", - "array-bytes 4.2.0", + "array-bytes", "chrono", "clap", "comfy-table", @@ -3249,7 +3769,7 @@ dependencies = [ "frame-system", "gethostname", "handlebars", - "itertools", + "itertools 0.10.5", "lazy_static", "linked-hash-map", "log", @@ -3271,12 +3791,13 @@ dependencies = [ "sp-database", "sp-externalities", "sp-inherents", + "sp-io", "sp-keystore", "sp-runtime", "sp-state-machine", - "sp-std", "sp-storage", "sp-trie", + "sp-wasm-interface", "thiserror", "thousands", ] @@ -3284,18 +3805,18 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -3312,11 +3833,12 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "frame-system", "frame-try-runtime", + "log", "parity-scale-codec", "scale-info", "sp-core", @@ -3328,9 +3850,9 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "15.1.0" +version = "16.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "878babb0b136e731cc77ec2fd883ff02745ff21e6fb662729953d44923df009c" +checksum = "87cf1549fba25a6fcac22785b61698317d958e96cac72a59102ea45b9ae64692" dependencies = [ "cfg-if", "parity-scale-codec", @@ -3341,7 +3863,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-recursion", "futures", @@ -3353,6 +3875,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", + "sp-state-machine", "spinners", "substrate-rpc-client", "tokio", @@ -3362,79 +3885,87 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ + "aquamarine", "bitflags 1.3.2", + "docify", "environmental", "frame-metadata", "frame-support-procedural", "impl-trait-for-tuples", "k256", "log", - "once_cell", + "macro_magic", "parity-scale-codec", "paste", "scale-info", "serde", + "serde_json", "smallvec", "sp-api", "sp-arithmetic", "sp-core", "sp-core-hashing-proc-macro", "sp-debug-derive", + "sp-genesis-builder", "sp-inherents", "sp-io", + "sp-metadata-ir", "sp-runtime", "sp-staking", "sp-state-machine", "sp-std", "sp-tracing", "sp-weights", + "static_assertions", "tt-call", ] [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "Inflector", "cfg-expr", "derive-syn-parse", + "expander 2.0.0", "frame-support-procedural-tools", - "itertools", + "itertools 0.10.5", + "macro_magic", "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "cfg-if", "frame-support", @@ -3453,7 +3984,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3468,7 +3999,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "sp-api", @@ -3477,7 +4008,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "parity-scale-codec", @@ -3508,7 +4039,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eeb4ed9e12f43b7fa0baae3f9cdda28352770132ef2e09a23760c29cae8bd47" dependencies = [ - "rustix 0.38.8", + "rustix 0.38.15", "windows-sys 0.48.0", ] @@ -3578,7 +4109,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "waker-fn", ] @@ -3590,7 +4121,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -3600,8 +4131,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", - "rustls 0.20.8", - "webpki 0.22.0", + "rustls 0.20.9", + "webpki 0.22.2", ] [[package]] @@ -3635,7 +4166,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "pin-utils", "slab", ] @@ -3732,6 +4263,12 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "gimli" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" + [[package]] name = "glob" version = "0.3.1" @@ -3775,9 +4312,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.20" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" dependencies = [ "bytes", "fnv", @@ -3794,9 +4331,9 @@ dependencies = [ [[package]] name = "handlebars" -version = "4.3.7" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" +checksum = "c39b3bc2a8f715298032cf5087e58573809374b08160aa7d750582bdb82d2683" dependencies = [ "log", "pest", @@ -3847,9 +4384,14 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" +dependencies = [ + "ahash 0.8.3", + "allocator-api2", + "serde", +] [[package]] name = "heck" @@ -3868,9 +4410,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hex" @@ -3933,6 +4475,15 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys 0.48.0", +] + [[package]] name = "hostname" version = "0.3.1" @@ -3963,7 +4514,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", ] [[package]] @@ -4006,7 +4557,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "socket2 0.4.9", "tokio", "tower-service", @@ -4016,18 +4567,19 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.2" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ + "futures-util", "http", "hyper", "log", - "rustls 0.20.8", + "rustls 0.21.7", "rustls-native-certs", "tokio", "tokio-rustls", - "webpki-roots", + "webpki-roots 0.23.1", ] [[package]] @@ -4147,6 +4699,25 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "include_dir" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +dependencies = [ + "include_dir_macros", +] + +[[package]] +name = "include_dir_macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -4160,19 +4731,25 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.1", ] +[[package]] +name = "indexmap-nostd" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e04e2fd2b8188ea827b32ef11de88377086d690286ab35747ef7f9bf3ccb590" + [[package]] name = "indicatif" -version = "0.17.6" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730" +checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" dependencies = [ "console", "instant", @@ -4239,7 +4816,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.3", "libc", "windows-sys 0.48.0", ] @@ -4256,7 +4833,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.3", + "socket2 0.5.4", "widestring", "windows-sys 0.48.0", "winreg", @@ -4274,20 +4851,38 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.2", - "rustix 0.38.8", + "hermit-abi 0.3.3", + "rustix 0.38.15", "windows-sys 0.48.0", ] [[package]] -name = "itertools" -version = "0.10.5" +name = "is_executable" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa9acdc6d67b75e626ad644734e8bc6df893d9cd2a834129065d3dd6158ea9c8" +dependencies = [ + "winapi", +] + +[[package]] +name = "itertools" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.9" @@ -4314,9 +4909,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d291e3a5818a2384645fd9756362e6d89cf0541b0b916fa7702ea4a9833608e" +checksum = "367a292944c07385839818bb71c8d76611138e2dedb0677d035b8da21d29c78b" dependencies = [ "jsonrpsee-core", "jsonrpsee-http-client", @@ -4329,9 +4924,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "965de52763f2004bc91ac5bcec504192440f0b568a5d621c59d9dbd6f886c3fb" +checksum = "c8b3815d9f5d5de348e5f162b316dc9cdf4548305ebb15b4eb9328e66cf27d7a" dependencies = [ "futures-util", "http", @@ -4345,14 +4940,14 @@ dependencies = [ "tokio-rustls", "tokio-util", "tracing", - "webpki-roots", + "webpki-roots 0.25.2", ] [[package]] name = "jsonrpsee-core" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4e70b4439a751a5de7dd5ed55eacff78ebf4ffe0fc009cb1ebb11417f5b536b" +checksum = "2b5dde66c53d6dcdc8caea1874a45632ec0fcf5b437789f1e45766a1512ce803" dependencies = [ "anyhow", "arrayvec 0.7.4", @@ -4378,9 +4973,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc345b0a43c6bc49b947ebeb936e886a419ee3d894421790c969cc56040542ad" +checksum = "7e5f9fabdd5d79344728521bb65e3106b49ec405a78b66fbff073b72b389fa43" dependencies = [ "async-trait", "hyper", @@ -4397,9 +4992,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baa6da1e4199c10d7b1d0a6e5e8bd8e55f351163b6f4b3cbb044672a69bd4c1c" +checksum = "44e8ab85614a08792b9bff6c8feee23be78c98d0182d4c622c05256ab553892a" dependencies = [ "heck", "proc-macro-crate", @@ -4410,9 +5005,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb69dad85df79527c019659a992498d03f8495390496da2f07e6c24c2b356fc" +checksum = "cf4d945a6008c9b03db3354fb3c83ee02d2faa9f2e755ec1dfb69c3551b8f4ba" dependencies = [ "futures-channel", "futures-util", @@ -4432,9 +5027,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd522fe1ce3702fd94812965d7bb7a3364b1c9aba743944c5a00529aae80f8c" +checksum = "245ba8e5aa633dd1c1e4fae72bce06e71f42d34c14a2767c6b4d173b57bee5e5" dependencies = [ "anyhow", "beef", @@ -4446,9 +5041,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b83daeecfc6517cfe210df24e570fb06213533dfb990318fae781f4c7119dd9" +checksum = "4e1b3975ed5d73f456478681a417128597acd6a2487855fdb7b4a3d4d195bf5e" dependencies = [ "http", "jsonrpsee-client-transport", @@ -4466,7 +5061,7 @@ dependencies = [ "ecdsa 0.16.8", "elliptic-curve 0.13.5", "once_cell", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -4478,109 +5073,10 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "kusama-runtime" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" -dependencies = [ - "bitvec", - "frame-benchmarking", - "frame-election-provider-support", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "kusama-runtime-constants", - "log", - "pallet-authority-discovery", - "pallet-authorship", - "pallet-babe", - "pallet-bags-list", - "pallet-balances", - "pallet-bounties", - "pallet-child-bounties", - "pallet-collective", - "pallet-conviction-voting", - "pallet-democracy", - "pallet-election-provider-multi-phase", - "pallet-election-provider-support-benchmarking", - "pallet-elections-phragmen", - "pallet-fast-unstake", - "pallet-grandpa", - "pallet-identity 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43)", - "pallet-im-online", - "pallet-indices", - "pallet-membership", - "pallet-message-queue", - "pallet-multisig", - "pallet-nis", - "pallet-nomination-pools", - "pallet-nomination-pools-benchmarking", - "pallet-nomination-pools-runtime-api", - "pallet-offences", - "pallet-offences-benchmarking", - "pallet-preimage", - "pallet-proxy", - "pallet-ranked-collective", - "pallet-recovery", - "pallet-referenda", - "pallet-scheduler", - "pallet-session", - "pallet-session-benchmarking", - "pallet-society", - "pallet-staking", - "pallet-staking-runtime-api", - "pallet-timestamp", - "pallet-tips", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-treasury", - "pallet-utility", - "pallet-vesting", - "pallet-whitelist", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parity-scale-codec", - "polkadot-primitives", - "polkadot-runtime-common", - "polkadot-runtime-parachains", - "rustc-hex", - "scale-info", - "serde", - "serde_derive", - "smallvec", - "sp-api", - "sp-arithmetic", - "sp-authority-discovery", - "sp-block-builder", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-core", - "sp-inherents", - "sp-io", - "sp-mmr-primitives", - "sp-npos-elections", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-staking", - "sp-std", - "sp-transaction-pool", - "sp-version", - "static_assertions", - "substrate-wasm-builder", - "xcm", - "xcm-builder", - "xcm-executor", -] - [[package]] name = "kusama-runtime-constants" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "polkadot-primitives", @@ -4624,6 +5120,17 @@ dependencies = [ "smallvec", ] +[[package]] +name = "landlock" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520baa32708c4e957d2fc3a186bc5bd8d26637c33137f399ddfc202adb240068" +dependencies = [ + "enumflags2", + "libc", + "thiserror", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -4638,9 +5145,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "libloading" @@ -4652,12 +5159,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "libm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" - [[package]] name = "libm" version = "0.2.7" @@ -4736,7 +5237,7 @@ dependencies = [ "libp2p-identity", "log", "multiaddr", - "multihash 0.17.0", + "multihash", "multistream-select", "once_cell", "parking_lot 0.12.1", @@ -4788,18 +5289,18 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1" +checksum = "276bb57e7af15d8f100d3c11cbdd32c6752b7eef4ba7a18ecf464972c07abcce" dependencies = [ - "bs58", + "bs58 0.4.0", "ed25519-dalek", "log", "multiaddr", - "multihash 0.17.0", + "multihash", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.7", + "sha2 0.10.8", "thiserror", "zeroize", ] @@ -4824,7 +5325,7 @@ dependencies = [ "log", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.7", + "sha2 0.10.8", "smallvec", "thiserror", "uint", @@ -4882,7 +5383,7 @@ dependencies = [ "once_cell", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.7", + "sha2 0.10.8", "snow", "static_assertions", "thiserror", @@ -4924,7 +5425,7 @@ dependencies = [ "parking_lot 0.12.1", "quinn-proto", "rand 0.8.5", - "rustls 0.20.8", + "rustls 0.20.9", "thiserror", "tokio", ] @@ -5004,10 +5505,10 @@ dependencies = [ "libp2p-core", "libp2p-identity", "rcgen 0.10.0", - "ring", - "rustls 0.20.8", + "ring 0.16.20", + "rustls 0.20.9", "thiserror", - "webpki 0.22.0", + "webpki 0.22.2", "x509-parser 0.14.0", "yasna", ] @@ -5043,7 +5544,7 @@ dependencies = [ "libp2p-identity", "libp2p-noise", "log", - "multihash 0.17.0", + "multihash", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", @@ -5073,7 +5574,7 @@ dependencies = [ "rw-stream-sink", "soketto", "url", - "webpki-roots", + "webpki-roots 0.22.6", ] [[package]] @@ -5189,9 +5690,9 @@ dependencies = [ [[package]] name = "linregress" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de0b5f52a9f84544d268f5fabb71b38962d6aa3c6600b8bcd27d44ccf9c9c45" +checksum = "4de04dcecc58d366391f9920245b85ffa684558a5ef6e7736e754347c3aea9c2" dependencies = [ "nalgebra", ] @@ -5210,9 +5711,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "3852614a3bd9ca9804678ba6be5e3b8ce76dfc902cae004e3e0c44051b6e88db" [[package]] name = "lock_api" @@ -5232,30 +5733,18 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "lru" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" -dependencies = [ - "hashbrown 0.12.3", -] - -[[package]] -name = "lru" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e7d46de488603ffdd5f30afbc64fbba2378214a2c3a2fb83abf3d33126df17" +checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" dependencies = [ "hashbrown 0.13.2", ] [[package]] name = "lru" -version = "0.10.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" -dependencies = [ - "hashbrown 0.13.2", -] +checksum = "a4a83fb7698b3643a0e34f9ae6f2e8f0178c0fd42f8b59d493aa271ff3a5bf21" [[package]] name = "lru-cache" @@ -5295,6 +5784,54 @@ dependencies = [ "libc", ] +[[package]] +name = "macro_magic" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aee866bfee30d2d7e83835a4574aad5b45adba4cc807f2a3bbba974e5d4383c9" +dependencies = [ + "macro_magic_core", + "macro_magic_macros", + "quote", + "syn 2.0.37", +] + +[[package]] +name = "macro_magic_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e766a20fd9c72bab3e1e64ed63f36bd08410e75803813df210d1ce297d7ad00" +dependencies = [ + "const-random", + "derive-syn-parse", + "macro_magic_core_macros", + "proc-macro2", + "quote", + "syn 2.0.37", +] + +[[package]] +name = "macro_magic_core_macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c12469fc165526520dff2807c2975310ab47cf7190a45b99b49a7dc8befab17b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.37", +] + +[[package]] +name = "macro_magic_macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" +dependencies = [ + "macro_magic_core", + "quote", + "syn 2.0.37", +] + [[package]] name = "maplit" version = "1.0.2" @@ -5324,9 +5861,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matrixmultiply" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090126dc04f95dc0d1c1c91f61bdd474b3930ca064c1edc8a849da2c6cbe1e77" +checksum = "7574c1cf36da4798ab73da5b215bbf444f50718207754cb522201d78d1cd0ff2" dependencies = [ "autocfg", "rawpointer", @@ -5334,26 +5871,27 @@ dependencies = [ [[package]] name = "md-5" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ + "cfg-if", "digest 0.10.7", ] [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memfd" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc89ccdc6e10d6907450f753537ebc5c5d3460d2e4e62ea74bd571db62c0f9e" +checksum = "b2cffa4ad52c6f791f4f8b15f0c05f9824b2ced1160e88cc393d64fff9a8ac64" dependencies = [ - "rustix 0.37.23", + "rustix 0.38.15", ] [[package]] @@ -5401,12 +5939,6 @@ dependencies = [ "hash-db 0.16.0", ] -[[package]] -name = "memory_units" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" - [[package]] name = "merlin" version = "2.0.1" @@ -5419,6 +5951,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "mick-jaeger" version = "0.1.8" @@ -5459,7 +6003,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "log", @@ -5478,7 +6022,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "anyhow", "jsonrpsee", @@ -5529,7 +6073,7 @@ dependencies = [ "data-encoding", "log", "multibase", - "multihash 0.17.0", + "multihash", "percent-encoding", "serde", "static_assertions", @@ -5550,9 +6094,9 @@ dependencies = [ [[package]] name = "multihash" -version = "0.16.3" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" +checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" dependencies = [ "blake2b_simd", "blake2s_simd", @@ -5560,24 +6104,11 @@ dependencies = [ "core2", "digest 0.10.7", "multihash-derive", - "sha2 0.10.7", + "sha2 0.10.8", "sha3 0.10.8", "unsigned-varint", ] -[[package]] -name = "multihash" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" -dependencies = [ - "core2", - "digest 0.10.7", - "multihash-derive", - "sha2 0.10.7", - "unsigned-varint", -] - [[package]] name = "multihash-derive" version = "0.8.0" @@ -5732,6 +6263,18 @@ dependencies = [ "memoffset 0.6.5", ] +[[package]] +name = "no-std-net" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" + +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + [[package]] name = "nohash-hasher" version = "0.2.0" @@ -5756,9 +6299,9 @@ checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ "autocfg", "num-integer", @@ -5821,49 +6364,29 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.3", "libc", ] [[package]] name = "num_enum" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" -dependencies = [ - "num_enum_derive 0.5.11", -] - -[[package]] -name = "num_enum" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" +checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" dependencies = [ - "num_enum_derive 0.6.1", + "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.5.11" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" +checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" dependencies = [ + "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", -] - -[[package]] -name = "num_enum_derive" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -5886,9 +6409,9 @@ dependencies = [ [[package]] name = "object" -version = "0.31.1" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] @@ -5945,7 +6468,7 @@ dependencies = [ "hex-literal", "impl-trait-for-tuples", "log", - "num_enum 0.5.11", + "num_enum", "orml-tokens", "orml-traits", "orml-vesting", @@ -5993,12 +6516,11 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-treasury", "pallet-unique", - "pallet-unique-scheduler-v2", "pallet-utility", "pallet-xcm", "parachain-info", "parity-scale-codec", - "polkadot-parachain", + "polkadot-parachain-primitives", "precompile-utils-macro", "scale-info", "serde", @@ -6016,15 +6538,15 @@ dependencies = [ "sp-std", "sp-transaction-pool", "sp-version", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "substrate-wasm-builder", "up-common", "up-data-structs", "up-pov-estimate-rpc", "up-rpc", "up-sponsorship", - "xcm", - "xcm-builder", - "xcm-executor", ] [[package]] @@ -6069,7 +6591,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2871aadd82a2c216ee68a69837a526dfe788ecbe74c4c5038a6acdbff6653066" dependencies = [ "expander 0.0.6", - "itertools", + "itertools 0.10.5", "petgraph", "proc-macro-crate", "proc-macro2", @@ -6089,10 +6611,11 @@ dependencies = [ [[package]] name = "orml-tokens" version = "0.4.1-dev" -source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v0.9.43#28a2e6f0df9540d91db4018c7ecebb8bfc217a2a" +source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" dependencies = [ "frame-support", "frame-system", + "log", "orml-traits", "parity-scale-codec", "scale-info", @@ -6105,31 +6628,33 @@ dependencies = [ [[package]] name = "orml-traits" version = "0.4.1-dev" -source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v0.9.43#28a2e6f0df9540d91db4018c7ecebb8bfc217a2a" +source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" dependencies = [ "frame-support", "impl-trait-for-tuples", "num-traits", "orml-utilities", "parity-scale-codec", + "paste", "scale-info", "serde", "sp-core", "sp-io", "sp-runtime", "sp-std", - "xcm", + "staging-xcm", ] [[package]] name = "orml-utilities" version = "0.4.1-dev" -source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v0.9.43#28a2e6f0df9540d91db4018c7ecebb8bfc217a2a" +source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" dependencies = [ "frame-support", "parity-scale-codec", "scale-info", "serde", + "sp-core", "sp-io", "sp-runtime", "sp-std", @@ -6138,7 +6663,7 @@ dependencies = [ [[package]] name = "orml-vesting" version = "0.4.1-dev" -source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v0.9.43#28a2e6f0df9540d91db4018c7ecebb8bfc217a2a" +source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" dependencies = [ "frame-support", "frame-system", @@ -6153,25 +6678,26 @@ dependencies = [ [[package]] name = "orml-xcm-support" version = "0.4.1-dev" -source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v0.9.43#28a2e6f0df9540d91db4018c7ecebb8bfc217a2a" +source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" dependencies = [ "frame-support", "orml-traits", "parity-scale-codec", "sp-runtime", "sp-std", - "xcm", - "xcm-executor", + "staging-xcm", + "staging-xcm-executor", ] [[package]] name = "orml-xtokens" version = "0.4.1-dev" -source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v0.9.43#28a2e6f0df9540d91db4018c7ecebb8bfc217a2a" +source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" dependencies = [ "cumulus-primitives-core", "frame-support", "frame-system", + "log", "orml-traits", "orml-xcm-support", "pallet-xcm", @@ -6181,8 +6707,8 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", - "xcm", - "xcm-executor", + "staging-xcm", + "staging-xcm-executor", ] [[package]] @@ -6193,7 +6719,7 @@ checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ "ecdsa 0.14.8", "elliptic-curve 0.12.3", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -6204,17 +6730,7 @@ checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" dependencies = [ "ecdsa 0.14.8", "elliptic-curve 0.12.3", - "sha2 0.10.7", -] - -[[package]] -name = "packed_simd_2" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" -dependencies = [ - "cfg-if", - "libm 0.1.4", + "sha2 0.10.8", ] [[package]] @@ -6242,10 +6758,11 @@ dependencies = [ [[package]] name = "pallet-aura" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "frame-system", + "log", "pallet-timestamp", "parity-scale-codec", "scale-info", @@ -6258,7 +6775,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "frame-system", @@ -6274,7 +6791,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "frame-system", @@ -6288,7 +6805,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6312,8 +6829,10 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ + "aquamarine", + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -6332,7 +6851,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6368,7 +6887,7 @@ dependencies = [ [[package]] name = "pallet-base-fee" version = "1.0.0" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "fp-evm", "frame-support", @@ -6382,10 +6901,11 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "frame-system", + "log", "pallet-authorship", "pallet-session", "parity-scale-codec", @@ -6401,9 +6921,9 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "binary-merkle-tree", "frame-support", "frame-system", @@ -6419,13 +6939,14 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", + "sp-state-machine", "sp-std", ] [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6443,7 +6964,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6488,7 +7009,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6519,6 +7040,7 @@ dependencies = [ "sp-core", "sp-runtime", "sp-std", + "sp-weights", "up-data-structs", "up-pov-estimate-rpc", ] @@ -6532,21 +7054,23 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", + "log", "parity-scale-codec", "scale-info", "smallvec", "sp-arithmetic", "sp-core", "sp-io", + "sp-runtime", "sp-std", + "staging-xcm", "up-common", - "xcm", ] [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6563,7 +7087,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6581,7 +7105,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6604,7 +7128,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6612,12 +7136,13 @@ dependencies = [ "parity-scale-codec", "sp-npos-elections", "sp-runtime", + "sp-std", ] [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6629,13 +7154,14 @@ dependencies = [ "sp-io", "sp-npos-elections", "sp-runtime", + "sp-staking", "sp-std", ] [[package]] name = "pallet-ethereum" version = "4.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "ethereum", "ethereum-types", @@ -6658,7 +7184,7 @@ dependencies = [ [[package]] name = "pallet-evm" version = "6.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "environmental", "evm", @@ -6667,6 +7193,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "hash-db 0.16.0", "hex", "hex-literal", "impl-trait-for-tuples", @@ -6696,6 +7223,7 @@ dependencies = [ "scale-info", "sp-core", "sp-std", + "sp-weights", "spez", "up-data-structs", ] @@ -6738,7 +7266,7 @@ dependencies = [ [[package]] name = "pallet-evm-precompile-simple" version = "2.0.0-dev" -source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v0.9.43#3ae25048cce709349b242e8ad3c54ada2b321564" +source = "git+https://github.com/uniquenetwork/unique-frontier?branch=unique-polkadot-v1.1.0#e99be6383ce0b0bc33768e479505cf4302dc0f8a" dependencies = [ "fp-evm", "ripemd", @@ -6764,8 +7292,9 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -6796,9 +7325,9 @@ dependencies = [ "serde", "sp-runtime", "sp-std", + "staging-xcm", + "staging-xcm-executor", "up-data-structs", - "xcm", - "xcm-executor", ] [[package]] @@ -6834,7 +7363,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6874,7 +7403,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "enumflags2", "frame-benchmarking", @@ -6890,7 +7419,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6910,7 +7439,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6950,13 +7479,14 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-core", + "sp-runtime", "sp-std", ] [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6973,7 +7503,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -6992,11 +7522,12 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "parity-scale-codec", "scale-info", "sp-core", @@ -7009,7 +7540,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7025,7 +7556,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7041,11 +7572,12 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "frame-system", "log", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core", @@ -7053,12 +7585,13 @@ dependencies = [ "sp-runtime", "sp-staking", "sp-std", + "sp-tracing", ] [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -7078,7 +7611,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -7110,7 +7643,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "frame-system", @@ -7127,7 +7660,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -7151,7 +7684,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7168,7 +7701,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7183,7 +7716,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7201,7 +7734,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7216,7 +7749,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "assert_matches", "frame-benchmarking", @@ -7255,8 +7788,9 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -7272,7 +7806,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "frame-system", @@ -7286,6 +7820,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", + "sp-state-machine", "sp-std", "sp-trie", ] @@ -7293,13 +7828,14 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "pallet-session", "pallet-staking", + "parity-scale-codec", "rand 0.8.5", "sp-runtime", "sp-session", @@ -7309,13 +7845,17 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", + "log", "parity-scale-codec", "rand_chacha 0.2.2", "scale-info", + "sp-arithmetic", + "sp-io", "sp-runtime", "sp-std", ] @@ -7323,7 +7863,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -7346,18 +7886,18 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "log", "sp-arithmetic", @@ -7366,7 +7906,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "sp-api", @@ -7375,7 +7915,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7401,6 +7941,7 @@ dependencies = [ "pallet-evm", "parity-scale-codec", "scale-info", + "sp-runtime", "sp-std", "up-data-structs", ] @@ -7408,7 +7949,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7423,7 +7964,7 @@ dependencies = [ [[package]] name = "pallet-template-transaction-payment" version = "3.0.0" -source = "git+https://github.com/uniquenetwork/pallet-sponsoring?branch=polkadot-v0.9.43#bd6e4a2a97b1415c22f96512b857d9615cbe2f81" +source = "git+https://github.com/uniquenetwork/pallet-sponsoring?branch=polkadot-v1.1.0#2fbec92640f517e4812be7ed61ef7691af6e0bba" dependencies = [ "frame-benchmarking", "frame-support", @@ -7446,16 +7987,16 @@ version = "0.1.0" dependencies = [ "frame-support", "frame-system", - "pallet-unique-scheduler-v2", "parity-scale-codec", "scale-info", + "sp-runtime", "sp-std", ] [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7467,13 +8008,14 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", + "sp-storage", "sp-timestamp", ] [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7492,7 +8034,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "frame-system", @@ -7508,7 +8050,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -7524,7 +8066,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -7536,7 +8078,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7575,28 +8117,10 @@ dependencies = [ "up-data-structs", ] -[[package]] -name = "pallet-unique-scheduler-v2" -version = "0.1.0" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "pallet-preimage", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "substrate-test-utils", -] - [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7612,7 +8136,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7627,7 +8151,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7641,8 +8165,8 @@ dependencies = [ [[package]] name = "pallet-xcm" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bounded-collections", "frame-benchmarking", @@ -7656,14 +8180,14 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", - "xcm", - "xcm-executor", + "staging-xcm", + "staging-xcm-executor", ] [[package]] name = "pallet-xcm-benchmarks" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-benchmarking", "frame-support", @@ -7674,28 +8198,30 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", - "xcm", - "xcm-builder", - "xcm-executor", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", ] [[package]] name = "parachain-info" version = "0.1.0" -source = "git+https://github.com/paritytech/cumulus?branch=polkadot-v0.9.43#b8999fce0f61fb757f9e57e326cda48e70137019" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "cumulus-primitives-core", "frame-support", "frame-system", "parity-scale-codec", "scale-info", + "sp-runtime", + "sp-std", ] [[package]] name = "parity-db" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78f19d20a0d2cc52327a88d131fa1c4ea81ea4a04714aedcfeca2dd410049cf8" +checksum = "ab512a34b3c2c5e465731cc7668edf79208bbe520be03484eeb05e63ed221735" dependencies = [ "blake2", "crc32fast", @@ -7713,9 +8239,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.4" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" +checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -7728,9 +8254,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.4" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" +checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -7752,9 +8278,9 @@ checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" [[package]] name = "parking" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "e52c774a4c39359c1d1c52e43f73dd91a75a614652c825408eec30c95a9b2067" [[package]] name = "parking_lot" @@ -7801,7 +8327,7 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec", - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] @@ -7834,6 +8360,15 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "pbkdf2" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -7866,19 +8401,20 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a" +checksum = "c022f1e7b65d6a24c0dbbd5fb344c66881bc01f3e5ae74a1c8100f2f985d98a4" dependencies = [ + "memchr", "thiserror", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666d00490d4ac815001da55838c500eafb0320019bbaa44444137c48b443a853" +checksum = "35513f630d46400a977c4cb58f78e1bfbe01434316e60c37d27b9ad6139c66d8" dependencies = [ "pest", "pest_generator", @@ -7886,36 +8422,36 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" +checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "pest_meta" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48" +checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" dependencies = [ "once_cell", "pest", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] name = "petgraph" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 1.9.3", + "indexmap 2.0.2", ] [[package]] @@ -7935,7 +8471,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -7946,9 +8482,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -7956,6 +8492,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.1", + "futures-io", +] + [[package]] name = "pkcs8" version = "0.9.0" @@ -7984,27 +8531,23 @@ checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "platforms" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" - -[[package]] -name = "platforms" -version = "3.0.2" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" +checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" [[package]] name = "polkadot-approval-distribution" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", + "futures-timer", "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", + "polkadot-node-subsystem-util", "polkadot-primitives", "rand 0.8.5", "tracing-gum", @@ -8012,10 +8555,12 @@ dependencies = [ [[package]] name = "polkadot-availability-bitfield-distribution" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ + "always-assert", "futures", + "futures-timer", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-util", @@ -8026,13 +8571,12 @@ dependencies = [ [[package]] name = "polkadot-availability-distribution" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "derive_more", "fatality", "futures", - "lru 0.9.0", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -8041,6 +8585,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "rand 0.8.5", + "schnellru", "sp-core", "sp-keystore", "thiserror", @@ -8049,12 +8594,11 @@ dependencies = [ [[package]] name = "polkadot-availability-recovery" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "fatality", "futures", - "lru 0.9.0", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -8064,21 +8608,20 @@ dependencies = [ "polkadot-primitives", "rand 0.8.5", "sc-network", + "schnellru", "thiserror", "tracing-gum", ] [[package]] name = "polkadot-cli" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "clap", "frame-benchmarking-cli", "futures", "log", - "polkadot-client", - "polkadot-node-core-pvf-worker", "polkadot-node-metrics", "polkadot-performance-test", "polkadot-service", @@ -8097,54 +8640,11 @@ dependencies = [ "try-runtime-cli", ] -[[package]] -name = "polkadot-client" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" -dependencies = [ - "async-trait", - "frame-benchmarking", - "frame-benchmarking-cli", - "frame-system", - "frame-system-rpc-runtime-api", - "futures", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "polkadot-core-primitives", - "polkadot-node-core-parachains-inherent", - "polkadot-primitives", - "polkadot-runtime", - "polkadot-runtime-common", - "sc-client-api", - "sc-consensus", - "sc-executor", - "sc-service", - "sp-api", - "sp-authority-discovery", - "sp-block-builder", - "sp-blockchain", - "sp-consensus", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-consensus-grandpa", - "sp-core", - "sp-inherents", - "sp-keyring", - "sp-mmr-primitives", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-storage", - "sp-timestamp", - "sp-transaction-pool", -] - [[package]] name = "polkadot-collator-protocol" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "always-assert", "bitvec", "fatality", "futures", @@ -8158,13 +8658,14 @@ dependencies = [ "sp-keystore", "sp-runtime", "thiserror", + "tokio-util", "tracing-gum", ] [[package]] name = "polkadot-core-primitives" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "scale-info", @@ -8175,15 +8676,14 @@ dependencies = [ [[package]] name = "polkadot-dispute-distribution" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "derive_more", "fatality", "futures", "futures-timer", "indexmap 1.9.3", - "lru 0.9.0", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -8192,6 +8692,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "sc-network", + "schnellru", "sp-application-crypto", "sp-keystore", "thiserror", @@ -8200,8 +8701,8 @@ dependencies = [ [[package]] name = "polkadot-erasure-coding" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "polkadot-node-primitives", @@ -8214,8 +8715,8 @@ dependencies = [ [[package]] name = "polkadot-gossip-support" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "futures-timer", @@ -8226,6 +8727,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "sc-network", + "sc-network-common", "sp-application-crypto", "sp-core", "sp-keystore", @@ -8234,8 +8736,8 @@ dependencies = [ [[package]] name = "polkadot-network-bridge" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "always-assert", "async-trait", @@ -8257,8 +8759,8 @@ dependencies = [ [[package]] name = "polkadot-node-collation-generation" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "parity-scale-codec", @@ -8275,16 +8777,15 @@ dependencies = [ [[package]] name = "polkadot-node-core-approval-voting" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bitvec", "derive_more", "futures", "futures-timer", "kvdb", - "lru 0.9.0", - "merlin", + "merlin 2.0.1", "parity-scale-codec", "polkadot-node-jaeger", "polkadot-node-primitives", @@ -8293,7 +8794,8 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "sc-keystore", - "schnorrkel", + "schnellru", + "schnorrkel 0.9.1", "sp-application-crypto", "sp-consensus", "sp-consensus-slots", @@ -8304,8 +8806,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-av-store" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bitvec", "futures", @@ -8313,6 +8815,7 @@ dependencies = [ "kvdb", "parity-scale-codec", "polkadot-erasure-coding", + "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-util", @@ -8325,8 +8828,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-backing" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bitvec", "fatality", @@ -8344,8 +8847,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-bitfield-signing" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "polkadot-node-subsystem", @@ -8359,8 +8862,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-candidate-validation" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "futures", @@ -8371,7 +8874,8 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-util", - "polkadot-parachain", + "polkadot-overseer", + "polkadot-parachain-primitives", "polkadot-primitives", "sp-maybe-compressed-blob", "tracing-gum", @@ -8379,8 +8883,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-api" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "polkadot-node-metrics", @@ -8394,8 +8898,8 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-selection" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "futures-timer", @@ -8411,27 +8915,27 @@ dependencies = [ [[package]] name = "polkadot-node-core-dispute-coordinator" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "fatality", "futures", "kvdb", - "lru 0.9.0", "parity-scale-codec", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", "sc-keystore", + "schnellru", "thiserror", "tracing-gum", ] [[package]] name = "polkadot-node-core-parachains-inherent" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "futures", @@ -8445,10 +8949,27 @@ dependencies = [ "tracing-gum", ] +[[package]] +name = "polkadot-node-core-prospective-parachains" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" +dependencies = [ + "bitvec", + "fatality", + "futures", + "parity-scale-codec", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "thiserror", + "tracing-gum", +] + [[package]] name = "polkadot-node-core-provisioner" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bitvec", "fatality", @@ -8458,15 +8979,14 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", - "rand 0.8.5", "thiserror", "tracing-gum", ] [[package]] name = "polkadot-node-core-pvf" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "always-assert", "futures", @@ -8475,25 +8995,26 @@ dependencies = [ "parity-scale-codec", "pin-project", "polkadot-core-primitives", + "polkadot-node-core-pvf-common", "polkadot-node-metrics", "polkadot-node-primitives", - "polkadot-parachain", + "polkadot-parachain-primitives", "polkadot-primitives", "rand 0.8.5", "slotmap", "sp-core", "sp-maybe-compressed-blob", - "sp-tracing", "sp-wasm-interface", "substrate-build-script-utils", + "tempfile", "tokio", "tracing-gum", ] [[package]] name = "polkadot-node-core-pvf-checker" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "polkadot-node-primitives", @@ -8507,29 +9028,46 @@ dependencies = [ ] [[package]] -name = "polkadot-node-core-pvf-worker" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +name = "polkadot-node-core-pvf-common" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "assert_matches", "cpu-time", "futures", + "landlock", "libc", "parity-scale-codec", - "polkadot-node-core-pvf", - "polkadot-parachain", + "polkadot-parachain-primitives", "polkadot-primitives", - "rayon", "sc-executor", "sc-executor-common", "sc-executor-wasmtime", "sp-core", "sp-externalities", "sp-io", + "sp-tracing", + "tokio", + "tracing-gum", +] + +[[package]] +name = "polkadot-node-core-pvf-prepare-worker" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" +dependencies = [ + "futures", + "libc", + "parity-scale-codec", + "polkadot-node-core-pvf-common", + "polkadot-parachain-primitives", + "polkadot-primitives", + "rayon", + "sc-executor", + "sc-executor-common", + "sc-executor-wasmtime", + "sp-io", "sp-maybe-compressed-blob", "sp-tracing", - "substrate-build-script-utils", - "tempfile", "tikv-jemalloc-ctl", "tokio", "tracing-gum", @@ -8537,23 +9075,23 @@ dependencies = [ [[package]] name = "polkadot-node-core-runtime-api" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", - "lru 0.9.0", "polkadot-node-metrics", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-primitives", + "schnellru", "sp-consensus-babe", "tracing-gum", ] [[package]] name = "polkadot-node-jaeger" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "lazy_static", "log", @@ -8570,10 +9108,10 @@ dependencies = [ [[package]] name = "polkadot-node-metrics" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "bs58", + "bs58 0.5.0", "futures", "futures-timer", "log", @@ -8589,11 +9127,12 @@ dependencies = [ [[package]] name = "polkadot-node-network-protocol" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-channel", "async-trait", + "bitvec", "derive_more", "fatality", "futures", @@ -8612,15 +9151,15 @@ dependencies = [ [[package]] name = "polkadot-node-primitives" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bounded-vec", "futures", "parity-scale-codec", - "polkadot-parachain", + "polkadot-parachain-primitives", "polkadot-primitives", - "schnorrkel", + "schnorrkel 0.9.1", "serde", "sp-application-crypto", "sp-consensus-babe", @@ -8629,13 +9168,13 @@ dependencies = [ "sp-maybe-compressed-blob", "sp-runtime", "thiserror", - "zstd 0.11.2+zstd.1.5.2", + "zstd 0.12.4", ] [[package]] name = "polkadot-node-subsystem" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "polkadot-node-jaeger", "polkadot-node-subsystem-types", @@ -8644,8 +9183,8 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-types" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "derive_more", @@ -8657,6 +9196,7 @@ dependencies = [ "polkadot-primitives", "polkadot-statement-table", "sc-network", + "sc-transaction-pool-api", "smallvec", "sp-api", "sp-authority-discovery", @@ -8667,17 +9207,16 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-util" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "derive_more", "fatality", "futures", "futures-channel", - "itertools", + "itertools 0.10.5", "kvdb", - "lru 0.9.0", "parity-db", "parity-scale-codec", "parking_lot 0.11.2", @@ -8691,6 +9230,7 @@ dependencies = [ "polkadot-primitives", "prioritized-metered-channel", "rand 0.8.5", + "schnellru", "sp-application-crypto", "sp-core", "sp-keystore", @@ -8700,13 +9240,12 @@ dependencies = [ [[package]] name = "polkadot-overseer" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "futures", "futures-timer", - "lru 0.9.0", "orchestra", "parking_lot 0.12.1", "polkadot-node-metrics", @@ -8715,6 +9254,7 @@ dependencies = [ "polkadot-node-subsystem-types", "polkadot-primitives", "sc-client-api", + "schnellru", "sp-api", "sp-core", "tikv-jemalloc-ctl", @@ -8722,9 +9262,9 @@ dependencies = [ ] [[package]] -name = "polkadot-parachain" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +name = "polkadot-parachain-primitives" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bounded-collections", "derive_more", @@ -8740,32 +9280,32 @@ dependencies = [ [[package]] name = "polkadot-performance-test" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "env_logger 0.9.3", - "kusama-runtime", "log", "polkadot-erasure-coding", - "polkadot-node-core-pvf-worker", + "polkadot-node-core-pvf-prepare-worker", "polkadot-node-primitives", "polkadot-primitives", "quote", "sc-executor-common", "sp-maybe-compressed-blob", + "staging-kusama-runtime", "thiserror", ] [[package]] name = "polkadot-primitives" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bitvec", "hex-literal", "parity-scale-codec", "polkadot-core-primitives", - "polkadot-parachain", + "polkadot-parachain-primitives", "scale-info", "serde", "sp-api", @@ -8784,8 +9324,8 @@ dependencies = [ [[package]] name = "polkadot-rpc" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "jsonrpsee", "mmr-rpc", @@ -8816,8 +9356,8 @@ dependencies = [ [[package]] name = "polkadot-runtime" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bitvec", "frame-benchmarking", @@ -8845,7 +9385,7 @@ dependencies = [ "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-grandpa", - "pallet-identity 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43)", + "pallet-identity 4.0.0-dev (git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0)", "pallet-im-online", "pallet-indices", "pallet-membership", @@ -8874,6 +9414,7 @@ dependencies = [ "pallet-vesting", "pallet-whitelist", "pallet-xcm", + "pallet-xcm-benchmarks", "parity-scale-codec", "polkadot-primitives", "polkadot-runtime-common", @@ -8900,19 +9441,20 @@ dependencies = [ "sp-session", "sp-staking", "sp-std", + "sp-storage", "sp-transaction-pool", "sp-version", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "static_assertions", "substrate-wasm-builder", - "xcm", - "xcm-builder", - "xcm-executor", ] [[package]] name = "polkadot-runtime-common" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bitvec", "frame-benchmarking", @@ -8951,14 +9493,14 @@ dependencies = [ "sp-session", "sp-staking", "sp-std", + "staging-xcm", "static_assertions", - "xcm", ] [[package]] name = "polkadot-runtime-constants" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "polkadot-primitives", @@ -8971,10 +9513,11 @@ dependencies = [ [[package]] name = "polkadot-runtime-metrics" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "bs58", + "bs58 0.5.0", + "frame-benchmarking", "parity-scale-codec", "polkadot-primitives", "sp-std", @@ -8983,8 +9526,8 @@ dependencies = [ [[package]] name = "polkadot-runtime-parachains" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bitflags 1.3.2", "bitvec", @@ -8992,6 +9535,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "impl-trait-for-tuples", "log", "pallet-authority-discovery", "pallet-authorship", @@ -9003,7 +9547,7 @@ dependencies = [ "pallet-timestamp", "pallet-vesting", "parity-scale-codec", - "polkadot-parachain", + "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-metrics", "rand 0.8.5", @@ -9021,39 +9565,42 @@ dependencies = [ "sp-session", "sp-staking", "sp-std", + "staging-xcm", + "staging-xcm-executor", "static_assertions", - "xcm", - "xcm-executor", ] [[package]] name = "polkadot-service" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", + "frame-benchmarking", "frame-benchmarking-cli", "frame-support", + "frame-system", "frame-system-rpc-runtime-api", "futures", "hex-literal", - "kusama-runtime", + "is_executable", "kvdb", "kvdb-rocksdb", "log", - "lru 0.9.0", "mmr-gadget", "pallet-babe", "pallet-im-online", "pallet-staking", + "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "parity-db", + "parity-scale-codec", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", - "polkadot-client", "polkadot-collator-protocol", + "polkadot-core-primitives", "polkadot-dispute-distribution", "polkadot-gossip-support", "polkadot-network-bridge", @@ -9067,7 +9614,9 @@ dependencies = [ "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", + "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", + "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-runtime-api", "polkadot-node-network-protocol", @@ -9076,11 +9625,11 @@ dependencies = [ "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-parachain", + "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-rpc", "polkadot-runtime", - "polkadot-runtime-constants", + "polkadot-runtime-common", "polkadot-runtime-parachains", "polkadot-statement-distribution", "rococo-runtime", @@ -9106,6 +9655,8 @@ dependencies = [ "sc-sysinfo", "sc-telemetry", "sc-transaction-pool", + "sc-transaction-pool-api", + "schnellru", "serde", "serde_json", "sp-api", @@ -9119,6 +9670,7 @@ dependencies = [ "sp-core", "sp-inherents", "sp-io", + "sp-keyring", "sp-keystore", "sp-mmr-primitives", "sp-offchain", @@ -9128,7 +9680,9 @@ dependencies = [ "sp-storage", "sp-timestamp", "sp-transaction-pool", - "sp-trie", + "sp-version", + "sp-weights", + "staging-kusama-runtime", "substrate-prometheus-endpoint", "thiserror", "tracing-gum", @@ -9137,17 +9691,20 @@ dependencies = [ [[package]] name = "polkadot-statement-distribution" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "arrayvec 0.5.2", + "arrayvec 0.7.4", + "bitvec", "fatality", "futures", + "futures-timer", "indexmap 1.9.3", "parity-scale-codec", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", + "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", "sp-keystore", @@ -9158,8 +9715,8 @@ dependencies = [ [[package]] name = "polkadot-statement-table" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "polkadot-primitives", @@ -9178,7 +9735,7 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "windows-sys 0.48.0", ] @@ -9193,6 +9750,17 @@ dependencies = [ "universal-hash 0.4.1", ] +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures", + "opaque-debug 0.3.0", + "universal-hash 0.5.1", +] + [[package]] name = "polyval" version = "0.5.3" @@ -9219,9 +9787,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f32154ba0af3a075eefa1eda8bb414ee928f62303a54ea85b8d6638ff1a6ee9e" +checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" [[package]] name = "ppv-lite86" @@ -9233,7 +9801,7 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" name = "precompile-utils-macro" version = "0.1.0" dependencies = [ - "num_enum 0.5.11", + "num_enum", "proc-macro2", "quote", "sha3 0.8.2", @@ -9248,7 +9816,7 @@ checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", - "itertools", + "itertools 0.10.5", "normalize-line-endings", "predicates-core", "regex", @@ -9282,12 +9850,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" +checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -9354,22 +9922,28 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-hack" +version = "0.5.20+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" + [[package]] name = "proc-macro-warning" -version = "0.3.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e99670bafb56b9a106419397343bdbc8b8742c3cc449fec6345f86173f47cd4" +checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] @@ -9408,7 +9982,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -9429,7 +10003,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -9450,7 +10024,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -9502,7 +10076,7 @@ dependencies = [ "hex-literal", "impl-trait-for-tuples", "log", - "num_enum 0.5.11", + "num_enum", "orml-tokens", "orml-traits", "orml-vesting", @@ -9554,7 +10128,7 @@ dependencies = [ "pallet-xcm", "parachain-info", "parity-scale-codec", - "polkadot-parachain", + "polkadot-parachain-primitives", "precompile-utils-macro", "scale-info", "serde", @@ -9572,15 +10146,15 @@ dependencies = [ "sp-std", "sp-transaction-pool", "sp-version", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "substrate-wasm-builder", "up-common", "up-data-structs", "up-pov-estimate-rpc", "up-rpc", "up-sponsorship", - "xcm", - "xcm-builder", - "xcm-executor", ] [[package]] @@ -9624,27 +10198,27 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31999cfc7927c4e212e60fd50934ab40e8e8bfd2d493d6095d2d306bc0764d9" +checksum = "c956be1b23f4261676aed05a0046e204e8a6836e50203902683a718af0797989" dependencies = [ "bytes", "rand 0.8.5", - "ring", + "ring 0.16.20", "rustc-hash", - "rustls 0.20.8", + "rustls 0.20.9", "slab", "thiserror", "tinyvec", "tracing", - "webpki 0.22.0", + "webpki 0.22.2", ] [[package]] name = "quote" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -9743,9 +10317,9 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" dependencies = [ "either", "rayon-core", @@ -9753,14 +10327,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", ] [[package]] @@ -9770,8 +10342,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", - "ring", - "time 0.3.25", + "ring 0.16.20", + "time", "x509-parser 0.13.2", "yasna", ] @@ -9783,8 +10355,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", - "ring", - "time 0.3.25", + "ring 0.16.20", + "time", "yasna", ] @@ -9819,35 +10391,35 @@ dependencies = [ [[package]] name = "reed-solomon-novelpoly" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bd8f48b2066e9f69ab192797d66da804d1935bf22763204ed3675740cb0f221" +checksum = "58130877ca403ab42c864fbac74bb319a0746c07a634a92a5cfc7f54af272582" dependencies = [ "derive_more", "fs-err", - "itertools", - "static_init 0.5.2", + "itertools 0.11.0", + "static_init", "thiserror", ] [[package]] name = "ref-cast" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ef7e18e8841942ddb1cf845054f8008410030a3997875d9e49b7a363063df1" +checksum = "acde58d073e9c79da00f2b5b84eed919c8326832648a5b109b3fce1bb1175280" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfaf0c85b766276c797f3791f5bc6d5bd116b41d53049af2789666b0c0bc9fa" +checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -9864,14 +10436,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.6", - "regex-syntax 0.7.4", + "regex-automata 0.3.9", + "regex-syntax 0.7.5", ] [[package]] @@ -9885,13 +10457,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.6" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.7.5", ] [[package]] @@ -9902,9 +10474,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "resolv-conf" @@ -9937,6 +10509,21 @@ dependencies = [ "subtle", ] +[[package]] +name = "ring" +version = "0.1.0" +source = "git+https://github.com/w3f/ring-proof?rev=0e948f3#0e948f3c28cbacecdd3020403c4841c0eb339213" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "common", + "fflonk", + "merlin 3.0.0", +] + [[package]] name = "ring" version = "0.16.20" @@ -9946,7 +10533,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", + "spin 0.5.2", "untrusted", "web-sys", "winapi", @@ -9995,8 +10582,8 @@ dependencies = [ [[package]] name = "rococo-runtime" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "binary-merkle-tree", "frame-benchmarking", @@ -10020,7 +10607,7 @@ dependencies = [ "pallet-democracy", "pallet-elections-phragmen", "pallet-grandpa", - "pallet-identity 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43)", + "pallet-identity 4.0.0-dev (git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0)", "pallet-im-online", "pallet-indices", "pallet-membership", @@ -10048,7 +10635,7 @@ dependencies = [ "pallet-xcm", "pallet-xcm-benchmarks", "parity-scale-codec", - "polkadot-parachain", + "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", @@ -10071,19 +10658,20 @@ dependencies = [ "sp-session", "sp-staking", "sp-std", + "sp-storage", "sp-transaction-pool", "sp-version", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "static_assertions", "substrate-wasm-builder", - "xcm", - "xcm-builder", - "xcm-executor", ] [[package]] name = "rococo-runtime-constants" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "polkadot-primitives", @@ -10179,7 +10767,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.18", + "semver 1.0.19", ] [[package]] @@ -10207,9 +10795,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.23" +version = "0.37.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" +checksum = "4279d76516df406a8bd37e7dff53fd37d1a093f997a3c34a5c21658c126db06d" dependencies = [ "bitflags 1.3.2", "errno", @@ -10221,14 +10809,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.8" +version = "0.38.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" +checksum = "d2f9da0cbd88f9f09e7814e388301c8414c51c62aa6ce1e4b5c551d49d96e531" dependencies = [ "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.5", + "linux-raw-sys 0.4.8", "windows-sys 0.48.0", ] @@ -10240,21 +10828,33 @@ checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ "base64 0.13.1", "log", - "ring", + "ring 0.16.20", "sct 0.6.1", "webpki 0.21.4", ] [[package]] name = "rustls" -version = "0.20.8" +version = "0.20.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" +dependencies = [ + "log", + "ring 0.16.20", + "sct 0.7.0", + "webpki 0.22.2", +] + +[[package]] +name = "rustls" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", - "ring", + "ring 0.16.20", + "rustls-webpki 0.101.6", "sct 0.7.0", - "webpki 0.22.0", ] [[package]] @@ -10275,7 +10875,27 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.2", + "base64 0.21.4", +] + +[[package]] +name = "rustls-webpki" +version = "0.100.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3" +dependencies = [ + "ring 0.16.20", + "untrusted", +] + +[[package]] +name = "rustls-webpki" +version = "0.101.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" +dependencies = [ + "ring 0.16.20", + "untrusted", ] [[package]] @@ -10284,6 +10904,17 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +[[package]] +name = "ruzstd" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3ffab8f9715a0d455df4bbb9d21e91135aab3cd3ca187af0cd0c3c3f868fdc" +dependencies = [ + "byteorder", + "thiserror-core", + "twox-hash", +] + [[package]] name = "rw-stream-sink" version = "0.3.0" @@ -10322,7 +10953,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "log", "sp-core", @@ -10333,7 +10964,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "futures", @@ -10341,14 +10972,13 @@ dependencies = [ "ip_network", "libp2p", "log", - "multihash 0.17.0", + "multihash", "parity-scale-codec", "prost", "prost-build", "rand 0.8.5", "sc-client-api", "sc-network", - "sc-network-common", "sp-api", "sp-authority-discovery", "sp-blockchain", @@ -10362,7 +10992,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "futures-timer", @@ -10385,7 +11015,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -10400,7 +11030,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -10419,20 +11049,20 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "chrono", "clap", "fdlimit", @@ -10448,7 +11078,6 @@ dependencies = [ "sc-client-db", "sc-keystore", "sc-network", - "sc-network-common", "sc-service", "sc-telemetry", "sc-tracing", @@ -10470,7 +11099,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "fnv", "futures", @@ -10486,7 +11115,6 @@ dependencies = [ "sp-core", "sp-database", "sp-externalities", - "sp-keystore", "sp-runtime", "sp-state-machine", "sp-statement-store", @@ -10497,7 +11125,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "hash-db 0.16.0", "kvdb", @@ -10523,7 +11151,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "futures", @@ -10548,7 +11176,7 @@ dependencies = [ [[package]] name = "sc-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "futures", @@ -10577,7 +11205,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "fork-tree", @@ -10592,8 +11220,8 @@ dependencies = [ "sc-consensus", "sc-consensus-epochs", "sc-consensus-slots", - "sc-keystore", "sc-telemetry", + "sc-transaction-pool-api", "scale-info", "sp-api", "sp-application-crypto", @@ -10613,7 +11241,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "jsonrpsee", @@ -10635,9 +11263,9 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "async-channel", "async-trait", "fnv", @@ -10647,9 +11275,7 @@ dependencies = [ "parking_lot 0.12.1", "sc-client-api", "sc-consensus", - "sc-keystore", "sc-network", - "sc-network-common", "sc-network-gossip", "sc-network-sync", "sc-utils", @@ -10671,7 +11297,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "jsonrpsee", @@ -10690,7 +11316,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "fork-tree", "parity-scale-codec", @@ -10703,10 +11329,10 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "ahash 0.8.3", - "array-bytes 4.2.0", + "array-bytes", "async-trait", "dyn-clone", "finality-grandpa", @@ -10725,6 +11351,7 @@ dependencies = [ "sc-network-common", "sc-network-gossip", "sc-telemetry", + "sc-transaction-pool-api", "sc-utils", "serde_json", "sp-api", @@ -10743,7 +11370,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "finality-grandpa", "futures", @@ -10763,7 +11390,7 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "assert_matches", "async-trait", @@ -10798,7 +11425,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "futures", @@ -10821,13 +11448,13 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "lru 0.8.1", "parity-scale-codec", "parking_lot 0.12.1", "sc-executor-common", "sc-executor-wasmtime", + "schnellru", "sp-api", "sp-core", "sp-externalities", @@ -10843,7 +11470,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -10855,13 +11482,12 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "anyhow", "cfg-if", "libc", "log", - "once_cell", "rustix 0.36.15", "sc-allocator", "sc-executor-common", @@ -10873,7 +11499,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "ansi_term", "futures", @@ -10889,9 +11515,9 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "parking_lot 0.12.1", "serde_json", "sp-application-crypto", @@ -10903,9 +11529,9 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "async-channel", "async-trait", "asynchronous-codec", @@ -10918,37 +11544,33 @@ dependencies = [ "libp2p", "linked_hash_set", "log", - "lru 0.8.1", "mockall", "parity-scale-codec", "parking_lot 0.12.1", + "partial_sort", "pin-project", "rand 0.8.5", - "sc-block-builder", "sc-client-api", - "sc-consensus", "sc-network-common", - "sc-peerset", "sc-utils", "serde", "serde_json", "smallvec", - "snow", "sp-arithmetic", "sp-blockchain", - "sp-consensus", "sp-core", "sp-runtime", "substrate-prometheus-endpoint", "thiserror", "unsigned-varint", + "wasm-timer", "zeroize", ] [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-channel", "cid", @@ -10959,7 +11581,6 @@ dependencies = [ "prost-build", "sc-client-api", "sc-network", - "sc-network-common", "sp-blockchain", "sp-runtime", "thiserror", @@ -10969,45 +11590,33 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", "async-trait", "bitflags 1.3.2", - "bytes", "futures", - "futures-timer", "libp2p-identity", "parity-scale-codec", "prost-build", "sc-consensus", - "sc-peerset", - "sc-utils", - "serde", - "smallvec", - "sp-blockchain", "sp-consensus", "sp-consensus-grandpa", "sp-runtime", - "substrate-prometheus-endpoint", - "thiserror", - "zeroize", ] [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "ahash 0.8.3", "futures", "futures-timer", "libp2p", "log", - "lru 0.8.1", "sc-network", "sc-network-common", - "sc-peerset", + "schnellru", "sp-runtime", "substrate-prometheus-endpoint", "tracing", @@ -11016,9 +11625,9 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "async-channel", "futures", "libp2p-identity", @@ -11028,8 +11637,6 @@ dependencies = [ "prost-build", "sc-client-api", "sc-network", - "sc-network-common", - "sc-peerset", "sp-blockchain", "sp-core", "sp-runtime", @@ -11039,9 +11646,9 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "async-channel", "async-trait", "fork-tree", @@ -11049,7 +11656,6 @@ dependencies = [ "futures-timer", "libp2p", "log", - "lru 0.8.1", "mockall", "parity-scale-codec", "prost", @@ -11058,8 +11664,8 @@ dependencies = [ "sc-consensus", "sc-network", "sc-network-common", - "sc-peerset", "sc-utils", + "schnellru", "smallvec", "sp-arithmetic", "sp-blockchain", @@ -11074,17 +11680,15 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "futures", "libp2p", "log", "parity-scale-codec", - "pin-project", "sc-network", "sc-network-common", - "sc-peerset", "sc-utils", "sp-consensus", "sp-runtime", @@ -11094,9 +11698,9 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "bytes", "fnv", "futures", @@ -11104,6 +11708,7 @@ dependencies = [ "hyper", "hyper-rustls", "libp2p", + "log", "num_cpus", "once_cell", "parity-scale-codec", @@ -11112,36 +11717,22 @@ dependencies = [ "sc-client-api", "sc-network", "sc-network-common", - "sc-peerset", + "sc-transaction-pool-api", "sc-utils", "sp-api", "sp-core", + "sp-externalities", + "sp-keystore", "sp-offchain", "sp-runtime", "threadpool", "tracing", ] -[[package]] -name = "sc-peerset" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" -dependencies = [ - "futures", - "libp2p-identity", - "log", - "parking_lot 0.12.1", - "partial_sort", - "sc-utils", - "serde_json", - "sp-arithmetic", - "wasm-timer", -] - [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -11150,7 +11741,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "jsonrpsee", @@ -11181,7 +11772,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11200,7 +11791,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "http", "jsonrpsee", @@ -11215,9 +11806,9 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", "futures", "futures-util", "hex", @@ -11228,6 +11819,7 @@ dependencies = [ "sc-chain-spec", "sc-client-api", "sc-transaction-pool-api", + "sc-utils", "serde", "sp-api", "sp-blockchain", @@ -11235,13 +11827,14 @@ dependencies = [ "sp-runtime", "sp-version", "thiserror", + "tokio", "tokio-stream", ] [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "directories", @@ -11268,11 +11861,9 @@ dependencies = [ "sc-network-light", "sc-network-sync", "sc-network-transactions", - "sc-offchain", "sc-rpc", "sc-rpc-server", "sc-rpc-spec-v2", - "sc-storage-monitor", "sc-sysinfo", "sc-telemetry", "sc-tracing", @@ -11295,7 +11886,7 @@ dependencies = [ "sp-transaction-storage-proof", "sp-trie", "sp-version", - "static_init 1.0.3", + "static_init", "substrate-prometheus-endpoint", "tempfile", "thiserror", @@ -11307,7 +11898,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "log", "parity-scale-codec", @@ -11318,14 +11909,12 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "clap", "fs4", - "futures", "log", "sc-client-db", - "sc-utils", "sp-core", "thiserror", "tokio", @@ -11334,7 +11923,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11353,7 +11942,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "libc", @@ -11372,7 +11961,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "chrono", "futures", @@ -11391,7 +11980,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "ansi_term", "atty", @@ -11399,12 +11988,10 @@ dependencies = [ "lazy_static", "libc", "log", - "once_cell", "parking_lot 0.12.1", "regex", "rustc-hash", "sc-client-api", - "sc-rpc-server", "sc-tracing-proc-macro", "serde", "sp-api", @@ -11422,25 +12009,24 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "futures", "futures-timer", "linked-hash-map", "log", - "num-traits", "parity-scale-codec", "parking_lot 0.12.1", "sc-client-api", @@ -11460,13 +12046,15 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "futures", "log", + "parity-scale-codec", "serde", "sp-blockchain", + "sp-core", "sp-runtime", "thiserror", ] @@ -11474,7 +12062,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-channel", "futures", @@ -11542,7 +12130,7 @@ dependencies = [ "arrayvec 0.5.2", "curve25519-dalek 2.1.3", "getrandom 0.1.16", - "merlin", + "merlin 2.0.1", "rand 0.7.3", "rand_core 0.5.1", "sha2 0.8.2", @@ -11550,6 +12138,22 @@ dependencies = [ "zeroize", ] +[[package]] +name = "schnorrkel" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "844b7645371e6ecdf61ff246ba1958c29e802881a749ae3fb1993675d210d28d" +dependencies = [ + "arrayref", + "arrayvec 0.7.4", + "curve25519-dalek-ng", + "merlin 3.0.0", + "rand_core 0.6.4", + "sha2 0.9.9", + "subtle-ng", + "zeroize", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -11568,7 +12172,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -11578,7 +12182,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -11683,9 +12287,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" dependencies = [ "serde", ] @@ -11698,29 +12302,29 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.183" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.183" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "serde_json" -version = "1.0.105" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", @@ -11751,9 +12355,9 @@ dependencies = [ [[package]] name = "sha1" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", @@ -11787,9 +12391,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", @@ -11827,18 +12431,28 @@ checksum = "b04774de876479a8f712e787f8271b14712971329a4be66c6dff144db7cfc343" [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "c1b21f559e07218024e7e9f90f96f601825397de0e25420135f7f952453fed0b" dependencies = [ "lazy_static", ] [[package]] name = "shlex" -version = "1.1.0" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" + +[[package]] +name = "signal-hook" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" +dependencies = [ + "libc", + "signal-hook-registry", +] [[package]] name = "signal-hook-registry" @@ -11884,15 +12498,15 @@ dependencies = [ [[package]] name = "siphasher" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] @@ -11905,8 +12519,8 @@ checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7" [[package]] name = "slot-range-helper" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "enumn", "parity-scale-codec", @@ -11926,9 +12540,116 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" + +[[package]] +name = "smol" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13f2b548cd8447f8de0fdf1c592929f70f4fc7039a05e47404b0d096ec6987a1" +dependencies = [ + "async-channel", + "async-executor", + "async-fs", + "async-io", + "async-lock", + "async-net", + "async-process", + "blocking", + "futures-lite", +] + +[[package]] +name = "smoldot" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0bb30cf57b7b5f6109ce17c3164445e2d6f270af2cb48f6e4d31c2967c9a9f5" +dependencies = [ + "arrayvec 0.7.4", + "async-lock", + "atomic-take", + "base64 0.21.4", + "bip39", + "blake2-rfc", + "bs58 0.5.0", + "chacha20 0.9.1", + "crossbeam-queue", + "derive_more", + "ed25519-zebra 4.0.3", + "either", + "event-listener", + "fnv", + "futures-lite", + "futures-util", + "hashbrown 0.14.1", + "hex", + "hmac 0.12.1", + "itertools 0.11.0", + "libsecp256k1", + "merlin 3.0.0", + "no-std-net", + "nom", + "num-bigint", + "num-rational", + "num-traits", + "pbkdf2 0.12.2", + "pin-project", + "poly1305 0.8.0", + "rand 0.8.5", + "rand_chacha 0.3.1", + "ruzstd", + "schnorrkel 0.10.2", + "serde", + "serde_json", + "sha2 0.10.8", + "sha3 0.10.8", + "siphasher", + "slab", + "smallvec", + "soketto", + "twox-hash", + "wasmi", + "x25519-dalek 2.0.0", + "zeroize", +] + +[[package]] +name = "smoldot-light" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "256b5bad1d6b49045e95fe87492ce73d5af81545d8b4d8318a872d2007024c33" +dependencies = [ + "async-channel", + "async-lock", + "base64 0.21.4", + "blake2-rfc", + "derive_more", + "either", + "event-listener", + "fnv", + "futures-channel", + "futures-lite", + "futures-util", + "hashbrown 0.14.1", + "hex", + "itertools 0.11.0", + "log", + "lru 0.11.1", + "no-std-net", + "parking_lot 0.12.1", + "pin-project", + "rand 0.8.5", + "rand_chacha 0.3.1", + "serde", + "serde_json", + "siphasher", + "slab", + "smol", + "smoldot", + "zeroize", +] [[package]] name = "snap" @@ -11938,18 +12659,18 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" +checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" dependencies = [ "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.1", + "curve25519-dalek 4.1.1", "rand_core 0.6.4", - "ring", + "ring 0.16.20", "rustc_version", - "sha2 0.10.7", + "sha2 0.10.8", "subtle", ] @@ -11965,9 +12686,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" dependencies = [ "libc", "windows-sys 0.48.0", @@ -11993,7 +12714,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "hash-db 0.16.0", "log", @@ -12001,6 +12722,7 @@ dependencies = [ "scale-info", "sp-api-proc-macro", "sp-core", + "sp-externalities", "sp-metadata-ir", "sp-runtime", "sp-state-machine", @@ -12013,21 +12735,21 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "Inflector", "blake2", - "expander 1.0.0", + "expander 2.0.0", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "sp-application-crypto" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "23.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "scale-info", @@ -12039,8 +12761,8 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "16.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "integer-sqrt", "num-traits", @@ -12054,7 +12776,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "scale-info", @@ -12067,9 +12789,8 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "parity-scale-codec", "sp-api", "sp-inherents", "sp-runtime", @@ -12079,13 +12800,13 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "futures", "log", - "lru 0.8.1", "parity-scale-codec", "parking_lot 0.12.1", + "schnellru", "sp-api", "sp-consensus", "sp-database", @@ -12097,7 +12818,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "futures", @@ -12112,14 +12833,13 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "parity-scale-codec", "scale-info", "sp-api", "sp-application-crypto", - "sp-consensus", "sp-consensus-slots", "sp-inherents", "sp-runtime", @@ -12130,7 +12850,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "parity-scale-codec", @@ -12138,11 +12858,9 @@ dependencies = [ "serde", "sp-api", "sp-application-crypto", - "sp-consensus", "sp-consensus-slots", "sp-core", "sp-inherents", - "sp-keystore", "sp-runtime", "sp-std", "sp-timestamp", @@ -12151,7 +12869,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "lazy_static", "parity-scale-codec", @@ -12170,7 +12888,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "finality-grandpa", "log", @@ -12188,7 +12906,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "scale-info", @@ -12199,16 +12917,18 @@ dependencies = [ [[package]] name = "sp-core" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "21.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "array-bytes 4.2.0", + "array-bytes", + "arrayvec 0.7.4", + "bandersnatch_vrfs", "bitflags 1.3.2", "blake2", "bounded-collections", - "bs58", + "bs58 0.5.0", "dyn-clonable", - "ed25519-zebra", + "ed25519-zebra 3.1.0", "futures", "hash-db 0.16.0", "hash256-std-hasher", @@ -12216,7 +12936,7 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "merlin", + "merlin 2.0.1", "parity-scale-codec", "parking_lot 0.12.1", "paste", @@ -12224,7 +12944,7 @@ dependencies = [ "rand 0.8.5", "regex", "scale-info", - "schnorrkel", + "schnorrkel 0.9.1", "secp256k1", "secrecy", "serde", @@ -12238,38 +12958,37 @@ dependencies = [ "substrate-bip39", "thiserror", "tiny-bip39", + "tracing", "zeroize", ] [[package]] name = "sp-core-hashing" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "9.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "blake2b_simd", "byteorder", "digest 0.10.7", - "sha2 0.10.7", + "sha2 0.10.8", "sha3 0.10.8", - "sp-std", "twox-hash", ] [[package]] name = "sp-core-hashing-proc-macro" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "9.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "proc-macro2", "quote", "sp-core-hashing", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -12277,18 +12996,18 @@ dependencies = [ [[package]] name = "sp-debug-derive" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "8.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "sp-externalities" -version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "0.19.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "environmental", "parity-scale-codec", @@ -12296,16 +13015,26 @@ dependencies = [ "sp-storage", ] +[[package]] +name = "sp-genesis-builder" +version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" +dependencies = [ + "serde_json", + "sp-api", + "sp-runtime", + "sp-std", +] + [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "impl-trait-for-tuples", "parity-scale-codec", "scale-info", - "sp-core", "sp-runtime", "sp-std", "thiserror", @@ -12313,13 +13042,11 @@ dependencies = [ [[package]] name = "sp-io" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "23.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bytes", - "ed25519", "ed25519-dalek", - "futures", "libsecp256k1", "log", "parity-scale-codec", @@ -12339,8 +13066,8 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "24.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "lazy_static", "sp-core", @@ -12350,13 +13077,11 @@ dependencies = [ [[package]] name = "sp-keystore" -version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "0.27.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "futures", "parity-scale-codec", "parking_lot 0.12.1", - "serde", "sp-core", "sp-externalities", "thiserror", @@ -12365,7 +13090,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "thiserror", "zstd 0.12.4", @@ -12374,7 +13099,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -12385,7 +13110,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -12403,7 +13128,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "scale-info", @@ -12417,7 +13142,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "sp-api", "sp-core", @@ -12426,8 +13151,8 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "8.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "backtrace", "lazy_static", @@ -12437,7 +13162,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "rustc-hash", "serde", @@ -12446,8 +13171,8 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "24.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "either", "hash256-std-hasher", @@ -12468,8 +13193,8 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "17.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -12486,25 +13211,26 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "11.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "Inflector", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "scale-info", "sp-api", "sp-core", + "sp-keystore", "sp-runtime", "sp-staking", "sp-std", @@ -12513,8 +13239,9 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ + "impl-trait-for-tuples", "parity-scale-codec", "scale-info", "serde", @@ -12525,8 +13252,8 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "0.28.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "hash-db 0.16.0", "log", @@ -12541,16 +13268,22 @@ dependencies = [ "sp-trie", "thiserror", "tracing", + "trie-db", ] [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ - "log", + "aes-gcm 0.10.3", + "curve25519-dalek 4.1.1", + "ed25519-dalek", + "hkdf", "parity-scale-codec", + "rand 0.8.5", "scale-info", + "sha2 0.10.8", "sp-api", "sp-application-crypto", "sp-core", @@ -12559,17 +13292,18 @@ dependencies = [ "sp-runtime-interface", "sp-std", "thiserror", + "x25519-dalek 2.0.0", ] [[package]] name = "sp-std" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "8.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" [[package]] name = "sp-storage" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "13.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "impl-serde", "parity-scale-codec", @@ -12582,11 +13316,9 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", - "futures-timer", - "log", "parity-scale-codec", "sp-inherents", "sp-runtime", @@ -12596,8 +13328,8 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "10.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "sp-std", @@ -12609,7 +13341,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "sp-api", "sp-runtime", @@ -12618,10 +13350,9 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", - "log", "parity-scale-codec", "scale-info", "sp-core", @@ -12633,8 +13364,8 @@ dependencies = [ [[package]] name = "sp-trie" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "22.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "ahash 0.8.3", "hash-db 0.16.0", @@ -12650,14 +13381,14 @@ dependencies = [ "sp-std", "thiserror", "tracing", - "trie-db 0.27.1", + "trie-db", "trie-root", ] [[package]] name = "sp-version" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "22.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "impl-serde", "parity-scale-codec", @@ -12673,33 +13404,32 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "8.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "sp-wasm-interface" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "14.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "anyhow", "impl-trait-for-tuples", "log", "parity-scale-codec", "sp-std", - "wasmi", "wasmtime", ] [[package]] name = "sp-weights" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +version = "20.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "parity-scale-codec", "scale-info", @@ -12719,7 +13449,7 @@ checksum = "c87e960f4dca2788eeb86bbdde8dd246be8948790b7618d656e68f9b720a86e8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -12728,6 +13458,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "spinners" version = "4.1.0" @@ -12761,9 +13497,9 @@ dependencies = [ [[package]] name = "ss58-registry" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14782ef66f16396bc977f43c89b36f2c7b58357a2cc0bf58a09627542c13c379" +checksum = "5e6915280e2d0db8911e5032a5c275571af6bdded2916abd691a659be25d3439" dependencies = [ "Inflector", "num-format", @@ -12780,24 +13516,177 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "staging-kusama-runtime" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" +dependencies = [ + "binary-merkle-tree", + "bitvec", + "frame-benchmarking", + "frame-election-provider-support", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "kusama-runtime-constants", + "log", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-bags-list", + "pallet-balances", + "pallet-beefy", + "pallet-beefy-mmr", + "pallet-bounties", + "pallet-child-bounties", + "pallet-collective", + "pallet-conviction-voting", + "pallet-democracy", + "pallet-election-provider-multi-phase", + "pallet-election-provider-support-benchmarking", + "pallet-elections-phragmen", + "pallet-fast-unstake", + "pallet-grandpa", + "pallet-identity 4.0.0-dev (git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0)", + "pallet-im-online", + "pallet-indices", + "pallet-membership", + "pallet-message-queue", + "pallet-mmr", + "pallet-multisig", + "pallet-nis", + "pallet-nomination-pools", + "pallet-nomination-pools-benchmarking", + "pallet-nomination-pools-runtime-api", + "pallet-offences", + "pallet-offences-benchmarking", + "pallet-preimage", + "pallet-proxy", + "pallet-ranked-collective", + "pallet-recovery", + "pallet-referenda", + "pallet-scheduler", + "pallet-session", + "pallet-session-benchmarking", + "pallet-society", + "pallet-staking", + "pallet-staking-runtime-api", + "pallet-state-trie-migration", + "pallet-timestamp", + "pallet-tips", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-treasury", + "pallet-utility", + "pallet-vesting", + "pallet-whitelist", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parity-scale-codec", + "polkadot-primitives", + "polkadot-runtime-common", + "polkadot-runtime-parachains", + "rustc-hex", + "scale-info", + "serde", + "serde_derive", + "smallvec", + "sp-api", + "sp-application-crypto", + "sp-arithmetic", + "sp-authority-discovery", + "sp-block-builder", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-core", + "sp-inherents", + "sp-io", + "sp-mmr-primitives", + "sp-npos-elections", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-staking", + "sp-std", + "sp-storage", + "sp-transaction-pool", + "sp-version", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "static_assertions", + "substrate-wasm-builder", +] + +[[package]] +name = "staging-xcm" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" +dependencies = [ + "bounded-collections", + "derivative", + "environmental", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-weights", + "xcm-procedural", +] + +[[package]] +name = "staging-xcm-builder" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" +dependencies = [ + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "log", + "pallet-transaction-payment", + "parity-scale-codec", + "polkadot-parachain-primitives", + "scale-info", + "sp-arithmetic", + "sp-io", + "sp-runtime", + "sp-std", + "sp-weights", + "staging-xcm", + "staging-xcm-executor", +] + +[[package]] +name = "staging-xcm-executor" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" +dependencies = [ + "environmental", + "frame-benchmarking", + "frame-support", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-weights", + "staging-xcm", +] + [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "static_init" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11b73400442027c4adedda20a9f9b7945234a5bd8d5f7e86da22bd5d0622369c" -dependencies = [ - "cfg_aliases", - "libc", - "parking_lot 0.11.2", - "static_init_macro 0.5.0", -] - [[package]] name = "static_init" version = "1.0.3" @@ -12809,23 +13698,10 @@ dependencies = [ "libc", "parking_lot 0.11.2", "parking_lot_core 0.8.6", - "static_init_macro 1.0.2", + "static_init_macro", "winapi", ] -[[package]] -name = "static_init_macro" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2261c91034a1edc3fc4d1b80e89d82714faede0515c14a75da10cb941546bbf" -dependencies = [ - "cfg_aliases", - "memchr", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "static_init_macro" version = "1.0.2" @@ -12886,7 +13762,7 @@ dependencies = [ "lazy_static", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "subtle", "thiserror", "tokio", @@ -12902,7 +13778,7 @@ checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c" dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", - "schnorrkel", + "schnorrkel 0.9.1", "sha2 0.9.9", "zeroize", ] @@ -12910,15 +13786,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" -dependencies = [ - "platforms 2.0.0", -] +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -12937,7 +13810,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "hyper", "log", @@ -12949,7 +13822,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "jsonrpsee", @@ -12962,56 +13835,34 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "jsonrpsee", - "log", "parity-scale-codec", "sc-client-api", "sc-rpc-api", - "scale-info", "serde", "sp-core", "sp-runtime", "sp-state-machine", "sp-trie", - "trie-db 0.27.1", -] - -[[package]] -name = "substrate-test-utils" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" -dependencies = [ - "futures", - "substrate-test-utils-derive", - "tokio", -] - -[[package]] -name = "substrate-test-utils-derive" -version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 2.0.28", + "trie-db", ] [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "ansi_term", "build-helper", "cargo_metadata", "filetime", + "parity-wasm", "sp-maybe-compressed-blob", "strum", "tempfile", - "toml 0.7.6", + "toml 0.7.8", "walkdir", "wasm-opt", ] @@ -13031,6 +13882,12 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +[[package]] +name = "subtle-ng" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" + [[package]] name = "syn" version = "1.0.109" @@ -13044,9 +13901,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.28" +version = "2.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" dependencies = [ "proc-macro2", "quote", @@ -13100,22 +13957,22 @@ checksum = "9d0e916b1148c8e263850e1ebcbd046f333e0683c724876bb0da63ea4373dc8a" [[package]] name = "tempfile" -version = "3.7.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", - "fastrand 2.0.0", + "fastrand 2.0.1", "redox_syscall 0.3.5", - "rustix 0.38.8", + "rustix 0.38.15", "windows-sys 0.48.0", ] [[package]] name = "termcolor" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" dependencies = [ "winapi-util", ] @@ -13153,29 +14010,49 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", + "staging-xcm", "up-data-structs", "up-sponsorship", - "xcm", ] [[package]] name = "thiserror" -version = "1.0.46" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9207952ae1a003f42d3d5e892dac3c6ba42aa6ac0c79a6a91a2b5cb4253e75c" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] +[[package]] +name = "thiserror-core" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d97345f6437bb2004cd58819d8a9ef8e36cdd7661c2abc4bbde0a7c40d9f497" +dependencies = [ + "thiserror-core-impl", +] + +[[package]] +name = "thiserror-core-impl" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "thiserror-impl" -version = "1.0.46" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1728216d3244de4f14f14f8c15c79be1a7c67867d28d69b719690e2a19fb445" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -13239,20 +14116,9 @@ dependencies = [ [[package]] name = "time" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.25" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" +checksum = "426f806f4089c493dcac0d24c29c01e2c38baf8e30f1b716ee37e83d200b18fe" dependencies = [ "deranged", "itoa", @@ -13263,15 +14129,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.11" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" +checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" dependencies = [ "time-core", ] @@ -13288,7 +14154,7 @@ dependencies = [ "pbkdf2 0.11.0", "rand 0.8.5", "rustc-hash", - "sha2 0.10.7", + "sha2 0.10.8", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -13331,9 +14197,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.31.0" +version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40de3a2ba249dcb097e01be5e67a5ff53cf250397715a071a81543e8a832a920" +checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ "backtrace", "bytes", @@ -13341,9 +14207,9 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "signal-hook-registry", - "socket2 0.5.3", + "socket2 0.5.4", "tokio-macros", "windows-sys 0.48.0", ] @@ -13356,7 +14222,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -13372,13 +14238,12 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.4" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.20.8", + "rustls 0.21.7", "tokio", - "webpki 0.22.0", ] [[package]] @@ -13388,22 +14253,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tokio", "tokio-util", ] [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tokio", "tracing", ] @@ -13419,9 +14284,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.6" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" dependencies = [ "serde", "serde_spanned", @@ -13440,11 +14305,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.14" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.0.2", "serde", "serde_spanned", "toml_datetime", @@ -13464,9 +14329,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "bitflags 2.4.0", "bytes", @@ -13475,7 +14340,7 @@ dependencies = [ "http", "http-body", "http-range-header", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tower-layer", "tower-service", ] @@ -13500,7 +14365,7 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tracing-attributes", "tracing-core", ] @@ -13513,7 +14378,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -13538,9 +14403,10 @@ dependencies = [ [[package]] name = "tracing-gum" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ + "coarsetime", "polkadot-node-jaeger", "polkadot-primitives", "tracing", @@ -13549,14 +14415,14 @@ dependencies = [ [[package]] name = "tracing-gum-proc-macro" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "expander 2.0.0", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -13603,18 +14469,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "trie-db" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "004e1e8f92535694b4cb1444dc5a8073ecf0815e3357f729638b9f8fc4062908" -dependencies = [ - "hash-db 0.15.2", - "hashbrown 0.12.3", - "log", - "smallvec", -] - [[package]] name = "trie-db" version = "0.27.1" @@ -13702,7 +14556,7 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "async-trait", "clap", @@ -13713,7 +14567,6 @@ dependencies = [ "parity-scale-codec", "sc-cli", "sc-executor", - "sc-service", "serde", "serde_json", "sp-api", @@ -13754,7 +14607,7 @@ dependencies = [ "log", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "stun", "thiserror", "tokio", @@ -13775,9 +14628,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "uc-rpc" @@ -13804,13 +14657,13 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-trie", - "trie-db 0.24.0", + "trie-db", "unique-runtime", "up-common", "up-data-structs", "up-pov-estimate-rpc", "up-rpc", - "zstd 0.11.2+zstd.1.5.2", + "zstd 0.12.4", ] [[package]] @@ -13839,9 +14692,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" @@ -13854,9 +14707,9 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unicode-xid" @@ -13871,8 +14724,10 @@ dependencies = [ "app-promotion-rpc", "clap", "cumulus-client-cli", + "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", + "cumulus-client-consensus-proposer", "cumulus-client-network", "cumulus-client-service", "cumulus-primitives-core", @@ -13880,6 +14735,7 @@ dependencies = [ "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", + "fc-api", "fc-consensus", "fc-db", "fc-mapping-sync", @@ -13893,6 +14749,7 @@ dependencies = [ "jsonrpsee", "log", "opal-runtime", + "pallet-transaction-payment-rpc", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "polkadot-cli", @@ -13909,6 +14766,7 @@ dependencies = [ "sc-network", "sc-network-sync", "sc-rpc", + "sc-rpc-api", "sc-service", "sc-sysinfo", "sc-telemetry", @@ -13921,6 +14779,7 @@ dependencies = [ "sp-blockchain", "sp-consensus-aura", "sp-core", + "sp-inherents", "sp-io", "sp-keystore", "sp-offchain", @@ -13934,7 +14793,6 @@ dependencies = [ "tokio", "try-runtime-cli", "uc-rpc", - "unique-rpc", "unique-runtime", "up-common", "up-data-structs", @@ -13942,40 +14800,6 @@ dependencies = [ "up-rpc", ] -[[package]] -name = "unique-rpc" -version = "0.1.2" -dependencies = [ - "app-promotion-rpc", - "fc-db", - "fc-mapping-sync", - "fc-rpc", - "fc-rpc-core", - "fp-rpc", - "fp-storage", - "jsonrpsee", - "pallet-ethereum", - "pallet-transaction-payment-rpc", - "sc-client-api", - "sc-network", - "sc-network-sync", - "sc-rpc", - "sc-rpc-api", - "sc-service", - "sc-transaction-pool", - "serde", - "sp-api", - "sp-block-builder", - "sp-blockchain", - "sp-runtime", - "substrate-frame-rpc-system", - "uc-rpc", - "up-common", - "up-data-structs", - "up-pov-estimate-rpc", - "up-rpc", -] - [[package]] name = "unique-runtime" version = "0.9.43" @@ -14004,7 +14828,7 @@ dependencies = [ "hex-literal", "impl-trait-for-tuples", "log", - "num_enum 0.5.11", + "num_enum", "orml-tokens", "orml-traits", "orml-vesting", @@ -14056,7 +14880,7 @@ dependencies = [ "pallet-xcm", "parachain-info", "parity-scale-codec", - "polkadot-parachain", + "polkadot-parachain-primitives", "precompile-utils-macro", "scale-info", "serde", @@ -14074,15 +14898,15 @@ dependencies = [ "sp-std", "sp-transaction-pool", "sp-version", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "substrate-wasm-builder", "up-common", "up-data-structs", "up-pov-estimate-rpc", "up-rpc", "up-sponsorship", - "xcm", - "xcm-builder", - "xcm-executor", ] [[package]] @@ -14107,9 +14931,9 @@ dependencies = [ [[package]] name = "unsigned-varint" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" dependencies = [ "asynchronous-codec", "bytes", @@ -14129,6 +14953,7 @@ version = "0.9.43" dependencies = [ "cumulus-primitives-core", "fp-rpc", + "fp-self-contained", "frame-support", "pallet-evm", "sp-consensus-aura", @@ -14185,16 +15010,16 @@ dependencies = [ [[package]] name = "up-sponsorship" version = "0.1.0" -source = "git+https://github.com/uniquenetwork/pallet-sponsoring?branch=polkadot-v0.9.43#bd6e4a2a97b1415c22f96512b857d9615cbe2f81" +source = "git+https://github.com/uniquenetwork/pallet-sponsoring?branch=polkadot-v1.1.0#2fbec92640f517e4812be7ed61ef7691af6e0bba" dependencies = [ "impl-trait-for-tuples", ] [[package]] name = "url" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", "idna 0.4.0", @@ -14251,15 +15076,15 @@ dependencies = [ [[package]] name = "waker-fn" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" [[package]] name = "walkdir" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", "winapi-util", @@ -14280,12 +15105,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -14313,7 +15132,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", "wasm-bindgen-shared", ] @@ -14347,7 +15166,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -14369,9 +15188,9 @@ dependencies = [ [[package]] name = "wasm-opt" -version = "0.112.0" +version = "0.114.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fef6d0d508f08334e0ab0e6877feb4c0ecb3956bcf2cb950699b22fedf3e9c" +checksum = "4d005a95f934878a1fb446a816d51c3601a0120ff929005ba3bab3c749cfd1c7" dependencies = [ "anyhow", "libc", @@ -14385,9 +15204,9 @@ dependencies = [ [[package]] name = "wasm-opt-cxx-sys" -version = "0.112.0" +version = "0.114.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc816bbc1596c8f2e8127e137a760c798023ef3d378f2ae51f0f1840e2dfa445" +checksum = "6d04e240598162810fad3b2e96fa0dec6dba1eb65a03f3bd99a9248ab8b56caa" dependencies = [ "anyhow", "cxx", @@ -14397,9 +15216,9 @@ dependencies = [ [[package]] name = "wasm-opt-sys" -version = "0.112.0" +version = "0.114.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40199e4f68ef1071b3c6d0bd8026a12b481865d4b9e49c156932ea9a6234dd14" +checksum = "2efd2aaca519d64098c4faefc8b7433a97ed511caf4c9e516384eb6aef1ff4f9" dependencies = [ "anyhow", "cc", @@ -14424,35 +15243,33 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.13.2" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c326c93fbf86419608361a2c925a31754cf109da1b8b55737070b4d6669422" +checksum = "1f341edb80021141d4ae6468cbeefc50798716a347d4085c3811900049ea8945" dependencies = [ - "parity-wasm", - "wasmi-validation", + "smallvec", + "spin 0.9.8", + "wasmi_arena", "wasmi_core", + "wasmparser-nostd", ] [[package]] -name = "wasmi-validation" -version = "0.5.0" +name = "wasmi_arena" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ff416ad1ff0c42e5a926ed5d5fab74c0f098749aa0ad8b2a34b982ce0e867b" -dependencies = [ - "parity-wasm", -] +checksum = "401c1f35e413fac1846d4843745589d9ec678977ab35a384db8ae7830525d468" [[package]] name = "wasmi_core" -version = "0.2.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d20cb3c59b788653d99541c646c561c9dd26506f25c0cebfe810659c54c6d7" +checksum = "dcf1a7db34bff95b85c261002720c00c3a6168256dcb93041d3fa2054d19856a" dependencies = [ "downcast-rs", - "libm 0.2.7", - "memory_units", - "num-rational", + "libm", "num-traits", + "paste", ] [[package]] @@ -14465,6 +15282,15 @@ dependencies = [ "url", ] +[[package]] +name = "wasmparser-nostd" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9157cab83003221bfd385833ab587a039f5d6fa7304854042ba358a3b09e0724" +dependencies = [ + "indexmap-nostd", +] + [[package]] name = "wasmtime" version = "8.0.1" @@ -14509,14 +15335,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c86437fa68626fe896e5afc69234bb2b5894949083586535f200385adfd71213" dependencies = [ "anyhow", - "base64 0.21.2", + "base64 0.21.4", "bincode", "directories-next", "file-per-thread-logger", "log", "rustix 0.36.15", "serde", - "sha2 0.10.7", + "sha2 0.10.8", "toml 0.5.11", "windows-sys 0.45.0", "zstd 0.11.2+zstd.1.5.2", @@ -14534,7 +15360,7 @@ dependencies = [ "cranelift-frontend", "cranelift-native", "cranelift-wasm", - "gimli", + "gimli 0.27.3", "log", "object 0.30.4", "target-lexicon", @@ -14553,7 +15379,7 @@ dependencies = [ "anyhow", "cranelift-codegen", "cranelift-native", - "gimli", + "gimli 0.27.3", "object 0.30.4", "target-lexicon", "wasmtime-environ", @@ -14567,7 +15393,7 @@ checksum = "a990198cee4197423045235bf89d3359e69bd2ea031005f4c2d901125955c949" dependencies = [ "anyhow", "cranelift-entity", - "gimli", + "gimli 0.27.3", "indexmap 1.9.3", "log", "object 0.30.4", @@ -14589,7 +15415,7 @@ dependencies = [ "bincode", "cfg-if", "cpp_demangle", - "gimli", + "gimli 0.27.3", "log", "object 0.30.4", "rustc-demangle", @@ -14676,17 +15502,17 @@ version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] [[package]] name = "webpki" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "07ecc0cd7cac091bf682ec5efa18b1cff79d617b84181f38b3951dbe135f607f" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -14696,9 +15522,24 @@ version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ - "webpki 0.22.0", + "webpki 0.22.2", +] + +[[package]] +name = "webpki-roots" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" +dependencies = [ + "rustls-webpki 0.100.3", ] +[[package]] +name = "webpki-roots" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" + [[package]] name = "webrtc" version = "0.6.0" @@ -14715,17 +15556,17 @@ dependencies = [ "rand 0.8.5", "rcgen 0.9.3", "regex", - "ring", + "ring 0.16.20", "rtcp", "rtp", "rustls 0.19.1", "sdp", "serde", "serde_json", - "sha2 0.10.7", + "sha2 0.10.8", "stun", "thiserror", - "time 0.3.25", + "time", "tokio", "turn", "url", @@ -14757,12 +15598,12 @@ dependencies = [ [[package]] name = "webrtc-dtls" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" +checksum = "c4a00f4242f2db33307347bd5be53263c52a0331c96c14292118c9a6bb48d267" dependencies = [ "aes 0.6.0", - "aes-gcm 0.10.2", + "aes-gcm 0.10.3", "async-trait", "bincode", "block-modes", @@ -14774,25 +15615,24 @@ dependencies = [ "hkdf", "hmac 0.12.1", "log", - "oid-registry 0.6.1", "p256", "p384", "rand 0.8.5", "rand_core 0.6.4", - "rcgen 0.9.3", - "ring", + "rcgen 0.10.0", + "ring 0.16.20", "rustls 0.19.1", "sec1 0.3.0", "serde", "sha1", - "sha2 0.10.7", + "sha2 0.10.8", "signature 1.6.4", "subtle", "thiserror", "tokio", "webpki 0.21.4", "webrtc-util", - "x25519-dalek 2.0.0-pre.1", + "x25519-dalek 2.0.0", "x509-parser 0.13.2", ] @@ -14910,9 +15750,10 @@ dependencies = [ [[package]] name = "westend-runtime" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ + "binary-merkle-tree", "bitvec", "frame-benchmarking", "frame-election-provider-support", @@ -14929,6 +15770,8 @@ dependencies = [ "pallet-babe", "pallet-bags-list", "pallet-balances", + "pallet-beefy", + "pallet-beefy-mmr", "pallet-collective", "pallet-democracy", "pallet-election-provider-multi-phase", @@ -14936,11 +15779,12 @@ dependencies = [ "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-grandpa", - "pallet-identity 4.0.0-dev (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43)", + "pallet-identity 4.0.0-dev (git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0)", "pallet-im-online", "pallet-indices", "pallet-membership", "pallet-message-queue", + "pallet-mmr", "pallet-multisig", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", @@ -14968,7 +15812,7 @@ dependencies = [ "pallet-xcm", "pallet-xcm-benchmarks", "parity-scale-codec", - "polkadot-parachain", + "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", @@ -14978,6 +15822,7 @@ dependencies = [ "serde_derive", "smallvec", "sp-api", + "sp-application-crypto", "sp-authority-discovery", "sp-block-builder", "sp-consensus-babe", @@ -14992,19 +15837,20 @@ dependencies = [ "sp-session", "sp-staking", "sp-std", + "sp-storage", "sp-transaction-pool", "sp-version", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "substrate-wasm-builder", "westend-runtime-constants", - "xcm", - "xcm-builder", - "xcm-executor", ] [[package]] name = "westend-runtime-constants" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "frame-support", "polkadot-primitives", @@ -15017,20 +15863,21 @@ dependencies = [ [[package]] name = "which" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", - "libc", + "home", "once_cell", + "rustix 0.38.15", ] [[package]] name = "wide" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa469ffa65ef7e0ba0f164183697b89b854253fd31aeb92358b7b6155177d62f" +checksum = "ebecebefc38ff1860b4bc47550bbfa63af5746061cf0d29fcd7fa63171602598" dependencies = [ "bytemuck", "safe_arch", @@ -15060,9 +15907,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] @@ -15092,7 +15939,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] @@ -15110,7 +15957,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] @@ -15130,17 +15977,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1eeca1c172a285ee6c2c84c341ccea837e7c01b12fbb2d0fe3c9e550ce49ec8" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm 0.48.2", - "windows_aarch64_msvc 0.48.2", - "windows_i686_gnu 0.48.2", - "windows_i686_msvc 0.48.2", - "windows_x86_64_gnu 0.48.2", - "windows_x86_64_gnullvm 0.48.2", - "windows_x86_64_msvc 0.48.2", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -15151,9 +15998,9 @@ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b10d0c968ba7f6166195e13d593af609ec2e3d24f916f081690695cf5eaffb2f" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" @@ -15169,9 +16016,9 @@ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571d8d4e62f26d4932099a9efe89660e8bd5087775a2ab5cdd8b747b811f1058" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" @@ -15187,9 +16034,9 @@ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2229ad223e178db5fbbc8bd8d3835e51e566b8474bfca58d2e6150c48bb723cd" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" @@ -15205,9 +16052,9 @@ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "600956e2d840c194eedfc5d18f8242bc2e17c7775b6684488af3a9fff6fe3287" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" @@ -15223,9 +16070,9 @@ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea99ff3f8b49fb7a8e0d305e5aec485bd068c2ba691b6e277d29eaeac945868a" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" @@ -15235,9 +16082,9 @@ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1a05a1ece9a7a0d5a7ccf30ba2c33e3a61a30e042ffd247567d1de1d94120d" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" @@ -15253,15 +16100,15 @@ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d419259aba16b663966e29e6d7c6ecfa0bb8425818bb96f6f1f3c3eb71a6e7b9" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.11" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e461589e194280efaa97236b73623445efa195aa633fd7004f39805707a9d53" +checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" dependencies = [ "memchr", ] @@ -15298,12 +16145,13 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0-pre.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" +checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 3.2.0", + "curve25519-dalek 4.1.1", "rand_core 0.6.4", + "serde", "zeroize", ] @@ -15320,10 +16168,10 @@ dependencies = [ "lazy_static", "nom", "oid-registry 0.4.0", - "ring", + "ring 0.16.20", "rusticata-macros", "thiserror", - "time 0.3.25", + "time", ] [[package]] @@ -15341,76 +16189,18 @@ dependencies = [ "oid-registry 0.6.1", "rusticata-macros", "thiserror", - "time 0.3.25", -] - -[[package]] -name = "xcm" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" -dependencies = [ - "bounded-collections", - "derivative", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "scale-info", - "serde", - "sp-weights", - "xcm-procedural", -] - -[[package]] -name = "xcm-builder" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" -dependencies = [ - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "log", - "pallet-transaction-payment", - "parity-scale-codec", - "polkadot-parachain", - "scale-info", - "sp-arithmetic", - "sp-io", - "sp-runtime", - "sp-std", - "sp-weights", - "xcm", - "xcm-executor", -] - -[[package]] -name = "xcm-executor" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" -dependencies = [ - "environmental", - "frame-benchmarking", - "frame-support", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "sp-arithmetic", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-weights", - "xcm", + "time", ] [[package]] name = "xcm-procedural" -version = "0.9.43" -source = "git+https://github.com/paritytech/polkadot?branch=release-v0.9.43#ba42b9ce51d25bdaf52d2c61e0763a6e3da50d25" +version = "1.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.1.0#f60318f68687e601c47de5ad5ca88e2c3f8139a7" dependencies = [ "Inflector", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -15433,7 +16223,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time 0.3.25", + "time", ] [[package]] @@ -15453,7 +16243,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] From c31888337da04a7441bbedf7e41aeaf486bcc760 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:07:15 +0200 Subject: [PATCH 093/143] refactor: move rpc to node --- node/rpc/CHANGELOG.md | 15 -- node/rpc/Cargo.toml | 48 ------ node/rpc/src/lib.rs | 339 ------------------------------------------ 3 files changed, 402 deletions(-) delete mode 100644 node/rpc/CHANGELOG.md delete mode 100644 node/rpc/Cargo.toml delete mode 100644 node/rpc/src/lib.rs diff --git a/node/rpc/CHANGELOG.md b/node/rpc/CHANGELOG.md deleted file mode 100644 index 3bdb41378c..0000000000 --- a/node/rpc/CHANGELOG.md +++ /dev/null @@ -1,15 +0,0 @@ - -## [v0.1.2] 2022-09-08 - -### Added -- Support RPC for `AppPromotion` pallet. - -## [v0.1.1] 2022-08-16 - -### Other changes - -- build: Upgrade polkadot to v0.9.27 2c498572636f2b34d53b1c51b7283a761a7dc90a - -- build: Upgrade polkadot to v0.9.26 85515e54c4ca1b82a2630034e55dcc804c643bf8 - -- build: Upgrade polkadot to v0.9.25 cdfb9bdc7b205ff1b5134f034ef9973d769e5e6b \ No newline at end of file diff --git a/node/rpc/Cargo.toml b/node/rpc/Cargo.toml deleted file mode 100644 index 02cf00e03e..0000000000 --- a/node/rpc/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -authors = ['Unique Network '] -description = "Unique chain rpc" -edition = "2021" -license = 'GPLv3' -name = "unique-rpc" -version = "0.1.2" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -jsonrpsee = { workspace = true } -# pallet-contracts-rpc = { git = 'https://github.com/paritytech/substrate', branch = 'master' } -pallet-transaction-payment-rpc = { workspace = true } -sc-client-api = { workspace = true } -sc-network = { workspace = true } -sc-network-sync = { workspace = true } -sc-rpc = { workspace = true } -sc-rpc-api = { workspace = true } -sc-service = { workspace = true } -sc-transaction-pool = { workspace = true } -sp-api = { workspace = true } -sp-block-builder = { workspace = true } -sp-blockchain = { workspace = true } -sp-runtime = { workspace = true } -substrate-frame-rpc-system = { workspace = true } - -fc-db = { workspace = true } -fc-mapping-sync = { workspace = true } -fc-rpc = { workspace = true } -fc-rpc-core = { workspace = true } -fp-rpc = { workspace = true } -fp-storage = { workspace = true } - -app-promotion-rpc = { workspace = true } -pallet-ethereum.workspace = true -serde = { workspace = true } -uc-rpc = { workspace = true } -up-common = { workspace = true } -up-data-structs = { workspace = true } -up-pov-estimate-rpc = { workspace = true, default-features = true } -up-rpc = { workspace = true } - -[features] -default = [] -pov-estimate = ['uc-rpc/pov-estimate'] -std = [] diff --git a/node/rpc/src/lib.rs b/node/rpc/src/lib.rs deleted file mode 100644 index c544c8f57f..0000000000 --- a/node/rpc/src/lib.rs +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright 2019-2022 Unique Network (Gibraltar) Ltd. -// This file is part of Unique Network. - -// Unique Network is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Unique Network is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Unique Network. If not, see . - -use fc_mapping_sync::{EthereumBlockNotificationSinks, EthereumBlockNotification}; -use sp_runtime::traits::BlakeTwo256; -use fc_rpc::{ - EthBlockDataCacheTask, OverrideHandle, RuntimeApiStorageOverride, SchemaV1Override, - StorageOverride, SchemaV2Override, SchemaV3Override, -}; -use jsonrpsee::RpcModule; -use fc_rpc_core::types::{FilterPool, FeeHistoryCache}; -use fp_storage::EthereumStorageSchema; -use sc_client_api::{ - backend::{AuxStore, StorageProvider}, - client::BlockchainEvents, - StateBackend, Backend, -}; -use sc_network::NetworkService; -use sc_network_sync::SyncingService; -use sc_rpc::SubscriptionTaskExecutor; -pub use sc_rpc_api::DenyUnsafe; -use sc_transaction_pool::{ChainApi, Pool}; -use sp_api::ProvideRuntimeApi; -use sp_block_builder::BlockBuilder; -use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; -use sc_service::TransactionPool; -use std::{collections::BTreeMap, sync::Arc}; - -use up_common::types::opaque::*; - -#[cfg(feature = "pov-estimate")] -type FullBackend = sc_service::TFullBackend; - -/// Full client dependencies. -pub struct FullDeps { - /// The client instance to use. - pub client: Arc, - /// Transaction pool instance. - pub pool: Arc

, - /// The SelectChain Strategy - pub select_chain: SC, - /// Whether to deny unsafe calls - pub deny_unsafe: DenyUnsafe, - - /// Runtime identification (read from the chain spec) - pub runtime_id: RuntimeId, - /// Executor params for PoV estimating - #[cfg(feature = "pov-estimate")] - pub exec_params: uc_rpc::pov_estimate::ExecutorParams, - /// Substrate Backend. - #[cfg(feature = "pov-estimate")] - pub backend: Arc, -} - -pub fn overrides_handle(client: Arc) -> Arc> -where - C: ProvideRuntimeApi + StorageProvider + AuxStore, - C: HeaderBackend + HeaderMetadata, - C: Send + Sync + 'static, - C::Api: fp_rpc::EthereumRuntimeRPCApi, - C::Api: up_rpc::UniqueApi::CrossAccountId, AccountId>, - BE: Backend + 'static, - BE::State: StateBackend, - R: RuntimeInstance + Send + Sync + 'static, -{ - let mut overrides_map = BTreeMap::new(); - overrides_map.insert( - EthereumStorageSchema::V1, - Box::new(SchemaV1Override::new(client.clone())) as Box + 'static>, - ); - overrides_map.insert( - EthereumStorageSchema::V2, - Box::new(SchemaV2Override::new(client.clone())) as Box + 'static>, - ); - overrides_map.insert( - EthereumStorageSchema::V3, - Box::new(SchemaV3Override::new(client.clone())) as Box + 'static>, - ); - - Arc::new(OverrideHandle { - schemas: overrides_map, - fallback: Box::new(RuntimeApiStorageOverride::new(client)), - }) -} - -/// Instantiate all Full RPC extensions. -pub fn create_full( - io: &mut RpcModule<()>, - deps: FullDeps, -) -> Result<(), Box> -where - C: ProvideRuntimeApi + StorageProvider + AuxStore, - C: HeaderBackend + HeaderMetadata + 'static, - C: Send + Sync + 'static, - C: BlockchainEvents, - C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: BlockBuilder, - // C::Api: pallet_contracts_rpc::ContractsRuntimeApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, - C::Api: up_rpc::UniqueApi::CrossAccountId, AccountId>, - C::Api: app_promotion_rpc::AppPromotionApi< - Block, - BlockNumber, - ::CrossAccountId, - AccountId, - >, - C::Api: up_pov_estimate_rpc::PovEstimateApi, - B: sc_client_api::Backend + Send + Sync + 'static, - B::State: sc_client_api::backend::StateBackend>, - P: TransactionPool + 'static, - R: RuntimeInstance + Send + Sync + 'static, - ::CrossAccountId: serde::Serialize, - C: sp_api::CallApiAt< - sp_runtime::generic::Block< - sp_runtime::generic::Header, - sp_runtime::OpaqueExtrinsic, - >, - >, - for<'de> ::CrossAccountId: serde::Deserialize<'de>, -{ - use uc_rpc::{UniqueApiServer, Unique}; - - use uc_rpc::{AppPromotionApiServer, AppPromotion}; - - #[cfg(feature = "pov-estimate")] - use uc_rpc::pov_estimate::{PovEstimateApiServer, PovEstimate}; - - // use pallet_contracts_rpc::{Contracts, ContractsApi}; - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; - use substrate_frame_rpc_system::{System, SystemApiServer}; - - let FullDeps { - client, - pool, - select_chain: _, - deny_unsafe, - - runtime_id: _, - - #[cfg(feature = "pov-estimate")] - exec_params, - - #[cfg(feature = "pov-estimate")] - backend, - } = deps; - - io.merge(System::new(Arc::clone(&client), Arc::clone(&pool), deny_unsafe).into_rpc())?; - io.merge(TransactionPayment::new(Arc::clone(&client)).into_rpc())?; - - io.merge(Unique::new(client.clone()).into_rpc())?; - - io.merge(AppPromotion::new(client).into_rpc())?; - - #[cfg(feature = "pov-estimate")] - io.merge( - PovEstimate::new( - client.clone(), - backend, - deny_unsafe, - exec_params, - runtime_id, - ) - .into_rpc(), - )?; - - Ok(()) -} - -pub struct EthDeps { - /// The client instance to use. - pub client: Arc, - /// Transaction pool instance. - pub pool: Arc

, - /// Graph pool instance. - pub graph: Arc>, - /// Syncing service - pub sync: Arc>, - /// The Node authority flag - pub is_authority: bool, - /// Network service - pub network: Arc>, - - /// Ethereum Backend. - pub eth_backend: Arc + Send + Sync>, - /// Maximum number of logs in a query. - pub max_past_logs: u32, - /// Maximum fee history cache size. - pub fee_history_limit: u64, - /// Fee history cache. - pub fee_history_cache: FeeHistoryCache, - pub eth_block_data_cache: Arc>, - /// EthFilterApi pool. - pub eth_filter_pool: Option, - pub eth_pubsub_notification_sinks: - Arc>>, - /// Whether to enable eth dev signer - pub enable_dev_signer: bool, - - pub overrides: Arc>, -} - -/// This converter is never used, but we have a generic -/// Option, where T should implement ConvertTransaction -/// -/// TODO: remove after never-type (`!`) stabilization -enum NeverConvert {} -impl fp_rpc::ConvertTransaction for NeverConvert { - fn convert_transaction(&self, _transaction: pallet_ethereum::Transaction) -> T { - unreachable!() - } -} - -pub fn create_eth( - io: &mut RpcModule<()>, - deps: EthDeps, - subscription_task_executor: SubscriptionTaskExecutor, -) -> Result<(), Box> -where - C: ProvideRuntimeApi + StorageProvider + AuxStore, - C: HeaderBackend + HeaderMetadata + 'static, - C: Send + Sync + 'static, - C: BlockchainEvents, - C::Api: BlockBuilder, - C::Api: fp_rpc::EthereumRuntimeRPCApi, - C::Api: fp_rpc::ConvertTransactionRuntimeApi, - P: TransactionPool + 'static, - CA: ChainApi + 'static, - B: sc_client_api::Backend + Send + Sync + 'static, - C: sp_api::CallApiAt< - sp_runtime::generic::Block< - sp_runtime::generic::Header, - sp_runtime::OpaqueExtrinsic, - >, - >, -{ - use fc_rpc::{ - Eth, EthApiServer, EthDevSigner, EthFilter, EthFilterApiServer, EthPubSub, - EthPubSubApiServer, EthSigner, Net, NetApiServer, Web3, Web3ApiServer, TxPool, - TxPoolApiServer, - }; - - let EthDeps { - client, - pool, - graph, - eth_backend, - max_past_logs, - fee_history_limit, - fee_history_cache, - eth_block_data_cache, - eth_filter_pool, - eth_pubsub_notification_sinks, - enable_dev_signer, - sync, - is_authority, - network, - overrides, - } = deps; - - let mut signers = Vec::new(); - if enable_dev_signer { - signers.push(Box::new(EthDevSigner::new()) as Box); - } - let execute_gas_limit_multiplier = 10; - io.merge( - Eth::new( - client.clone(), - pool.clone(), - graph.clone(), - // We have no runtimes old enough to only accept converted transactions - None::, - sync.clone(), - signers, - overrides.clone(), - eth_backend.clone(), - is_authority, - eth_block_data_cache.clone(), - fee_history_cache, - fee_history_limit, - execute_gas_limit_multiplier, - None, - ) - .into_rpc(), - )?; - - let tx_pool = TxPool::new(client.clone(), graph); - - if let Some(filter_pool) = eth_filter_pool { - io.merge( - EthFilter::new( - client.clone(), - eth_backend, - tx_pool.clone(), - filter_pool, - 500_usize, // max stored filters - max_past_logs, - eth_block_data_cache, - ) - .into_rpc(), - )?; - } - io.merge( - Net::new( - client.clone(), - network, - // Whether to format the `peer_count` response as Hex (default) or not. - true, - ) - .into_rpc(), - )?; - io.merge(Web3::new(client.clone()).into_rpc())?; - io.merge( - EthPubSub::new( - pool, - client, - sync, - subscription_task_executor, - overrides, - eth_pubsub_notification_sinks, - ) - .into_rpc(), - )?; - io.merge(tx_pool.into_rpc())?; - - Ok(()) -} From a64769ff6bff476597daecb73268349426e161d7 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:07:25 +0200 Subject: [PATCH 094/143] refactor: upgrade code for new substrate --- node/cli/src/chain_spec.rs | 30 +- node/cli/src/command.rs | 59 +-- node/cli/src/service.rs | 480 +++++++----------- pallets/app-promotion/src/benchmarking.rs | 4 +- pallets/app-promotion/src/lib.rs | 38 +- pallets/app-promotion/src/types.rs | 4 +- .../collator-selection/src/benchmarking.rs | 8 +- pallets/collator-selection/src/lib.rs | 36 +- pallets/collator-selection/src/mock.rs | 25 +- pallets/common/src/lib.rs | 3 +- pallets/configuration/src/benchmarking.rs | 4 +- pallets/configuration/src/lib.rs | 17 +- pallets/evm-contract-helpers/src/eth.rs | 2 +- pallets/evm-contract-helpers/src/lib.rs | 8 +- pallets/evm-migration/src/benchmarking.rs | 4 +- pallets/foreign-assets/src/impl_fungibles.rs | 86 ++-- pallets/foreign-assets/src/lib.rs | 43 +- pallets/identity/src/tests.rs | 23 +- pallets/identity/src/types.rs | 16 +- pallets/inflation/src/lib.rs | 27 +- pallets/inflation/src/tests.rs | 25 +- pallets/nonfungible/src/lib.rs | 45 +- pallets/unique/src/lib.rs | 16 +- primitives/common/src/constants.rs | 12 +- primitives/common/src/types.rs | 4 +- primitives/data-structs/src/bondrewd_codec.rs | 8 +- primitives/data-structs/src/bounded.rs | 22 +- primitives/data-structs/src/lib.rs | 198 +++++--- primitives/rpc/src/lib.rs | 4 +- .../config/pallets/collator_selection.rs | 2 +- runtime/common/config/substrate.rs | 11 +- runtime/common/config/xcm/foreignassets.rs | 66 +-- runtime/common/config/xcm/mod.rs | 7 +- runtime/common/config/xcm/nativeassets.rs | 7 +- runtime/common/construct_runtime.rs | 6 +- runtime/common/mod.rs | 19 +- runtime/common/sponsoring.rs | 14 +- runtime/common/tests/mod.rs | 14 +- runtime/tests/src/lib.rs | 32 +- 39 files changed, 682 insertions(+), 747 deletions(-) diff --git a/node/cli/src/chain_spec.rs b/node/cli/src/chain_spec.rs index 92a75f11e4..5d5e0c4fd3 100644 --- a/node/cli/src/chain_spec.rs +++ b/node/cli/src/chain_spec.rs @@ -153,11 +153,12 @@ macro_rules! testnet_genesis { ) => {{ use $runtime::*; - GenesisConfig { + RuntimeGenesisConfig { system: SystemConfig { code: WASM_BINARY .expect("WASM binary was not build, please build it!") .to_vec(), + ..Default::default() }, balances: BalancesConfig { balances: $endowed_accounts @@ -167,10 +168,6 @@ macro_rules! testnet_genesis { .map(|k| (k, 1 << 100)) .collect(), }, - common: Default::default(), - configuration: Default::default(), - nonfungible: Default::default(), - treasury: Default::default(), tokens: TokensConfig { balances: vec![] }, sudo: SudoConfig { key: Some($root_key), @@ -179,8 +176,8 @@ macro_rules! testnet_genesis { vesting: VestingConfig { vesting: vec![] }, parachain_info: ParachainInfoConfig { parachain_id: $id.into(), + ..Default::default() }, - parachain_system: Default::default(), collator_selection: CollatorSelectionConfig { invulnerables: $initial_invulnerables .iter() @@ -200,14 +197,10 @@ macro_rules! testnet_genesis { }) .collect(), }, - aura: Default::default(), - aura_ext: Default::default(), evm: EVMConfig { accounts: BTreeMap::new(), + ..Default::default() }, - ethereum: EthereumConfig {}, - polkadot_xcm: Default::default(), - transaction_payment: Default::default(), ..Default::default() } }}; @@ -224,15 +217,13 @@ macro_rules! testnet_genesis { ) => {{ use $runtime::*; - GenesisConfig { + RuntimeGenesisConfig { system: SystemConfig { code: WASM_BINARY .expect("WASM binary was not build, please build it!") .to_vec(), + ..Default::default() }, - common: Default::default(), - configuration: Default::default(), - nonfungible: Default::default(), balances: BalancesConfig { balances: $endowed_accounts .iter() @@ -241,7 +232,6 @@ macro_rules! testnet_genesis { .map(|k| (k, 1 << 100)) .collect(), }, - treasury: Default::default(), tokens: TokensConfig { balances: vec![] }, sudo: SudoConfig { key: Some($root_key), @@ -249,21 +239,19 @@ macro_rules! testnet_genesis { vesting: VestingConfig { vesting: vec![] }, parachain_info: ParachainInfoConfig { parachain_id: $id.into(), + Default::default() }, - parachain_system: Default::default(), aura: AuraConfig { authorities: $initial_invulnerables .into_iter() .map(|(_, aura)| aura) .collect(), }, - aura_ext: Default::default(), evm: EVMConfig { accounts: BTreeMap::new(), + ..Default::default() }, - ethereum: EthereumConfig {}, - polkadot_xcm: Default::default(), - transaction_payment: Default::default(), + ..Default::default() } }}; } diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 3db301a27a..041331aa76 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -135,19 +135,6 @@ impl SubstrateCli for Cli { fn load_spec(&self, id: &str) -> std::result::Result, String> { load_spec(id) } - - fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion { - match chain_spec.runtime_id() { - #[cfg(feature = "unique-runtime")] - RuntimeId::Unique => &unique_runtime::VERSION, - - #[cfg(feature = "quartz-runtime")] - RuntimeId::Quartz => &quartz_runtime::VERSION, - - RuntimeId::Opal => &opal_runtime::VERSION, - runtime_id => panic!("{}", no_runtime_err!(runtime_id)), - } - } } impl SubstrateCli for RelayChainCli { @@ -185,24 +172,20 @@ impl SubstrateCli for RelayChainCli { fn load_spec(&self, id: &str) -> std::result::Result, String> { polkadot_cli::Cli::from_iter([RelayChainCli::executable_name()].iter()).load_spec(id) } - - fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion { - polkadot_cli::Cli::native_runtime_version(chain_spec) - } } macro_rules! async_run_with_runtime { ( - $runtime_api:path, $executor:path, + $runtime:path, $runtime_api:path, $executor:path, $runner:ident, $components:ident, $cli:ident, $cmd:ident, $config:ident, $( $code:tt )* ) => { $runner.async_run(|$config| { let $components = new_partial::< - $runtime_api, $executor, _ + $runtime, $runtime_api, $executor, _ >( &$config, - crate::service::parachain_build_import_queue, + crate::service::parachain_build_import_queue::<$runtime, _, _>, )?; let task_manager = $components.task_manager; @@ -218,18 +201,18 @@ macro_rules! construct_async_run { match runner.config().chain_spec.runtime_id() { #[cfg(feature = "unique-runtime")] RuntimeId::Unique => async_run_with_runtime!( - unique_runtime::RuntimeApi, UniqueRuntimeExecutor, + unique_runtime::Runtime, unique_runtime::RuntimeApi, UniqueRuntimeExecutor, runner, $components, $cli, $cmd, $config, $( $code )* ), #[cfg(feature = "quartz-runtime")] RuntimeId::Quartz => async_run_with_runtime!( - quartz_runtime::RuntimeApi, QuartzRuntimeExecutor, + quartz_runtime::Runtime, quartz_runtime::RuntimeApi, QuartzRuntimeExecutor, runner, $components, $cli, $cmd, $config, $( $code )* ), RuntimeId::Opal => async_run_with_runtime!( - opal_runtime::RuntimeApi, OpalRuntimeExecutor, + opal_runtime::Runtime, opal_runtime::RuntimeApi, OpalRuntimeExecutor, runner, $components, $cli, $cmd, $config, $( $code )* ), @@ -240,11 +223,18 @@ macro_rules! construct_async_run { macro_rules! sync_run_with_runtime { ( - $runtime_api:path, $executor:path, + $runtime:path, $runtime_api:path, $executor:path, $runner:ident, $components:ident, $cli:ident, $cmd:ident, $config:ident, $( $code:tt )* ) => { $runner.sync_run(|$config| { + let $components = new_partial::< + $runtime, $runtime_api, $executor, _ + >( + &$config, + crate::service::parachain_build_import_queue::<$runtime, _, _>, + )?; + $( $code )* }) }; @@ -257,18 +247,18 @@ macro_rules! construct_sync_run { match runner.config().chain_spec.runtime_id() { #[cfg(feature = "unique-runtime")] RuntimeId::Unique => sync_run_with_runtime!( - unique_runtime::RuntimeApi, UniqueRuntimeExecutor, + unique_runtime::Runtime, unique_runtime::RuntimeApi, UniqueRuntimeExecutor, runner, $components, $cli, $cmd, $config, $( $code )* ), #[cfg(feature = "quartz-runtime")] RuntimeId::Quartz => sync_run_with_runtime!( - quartz_runtime::RuntimeApi, QuartzRuntimeExecutor, + quartz_runtime::Runtime, quartz_runtime::RuntimeApi, QuartzRuntimeExecutor, runner, $components, $cli, $cmd, $config, $( $code )* ), RuntimeId::Opal => sync_run_with_runtime!( - opal_runtime::RuntimeApi, OpalRuntimeExecutor, + opal_runtime::Runtime, opal_runtime::RuntimeApi, OpalRuntimeExecutor, runner, $components, $cli, $cmd, $config, $( $code )* ), @@ -362,12 +352,11 @@ pub fn run() -> Result<()> { Some(Subcommand::ExportGenesisState(cmd)) => { construct_sync_run!(|components, cli, cmd, _config| { let spec = cli.load_spec(&cmd.shared_params.chain.clone().unwrap_or_default())?; - let state_version = Cli::native_runtime_version(&spec).state_version(); - cmd.run::(&*spec, state_version) + cmd.run(&*spec, &*components.client) }) } Some(Subcommand::ExportGenesisWasm(cmd)) => { - construct_sync_run!(|components, cli, cmd, _config| { + construct_sync_run!(|_components, cli, cmd, _config| { let spec = cli.load_spec(&cmd.shared_params.chain.clone().unwrap_or_default())?; cmd.run(&*spec) }) @@ -411,6 +400,7 @@ pub fn run() -> Result<()> { #[cfg(feature = "try-runtime")] Some(Subcommand::TryRuntime(cmd)) => { use std::{future::Future, pin::Pin}; + use sc_executor::{sp_wasm_interface::ExtendedHostFunctions, NativeExecutionDispatch}; use try_runtime_cli::block_building_info::timestamp_with_aura_info; @@ -507,12 +497,6 @@ pub fn run() -> Result<()> { ¶_id, ); - let state_version = Cli::native_runtime_version(&config.chain_spec).state_version(); - let block: Block = generate_genesis_block(&*config.chain_spec, state_version) - .map_err(|e| format!("{e:?}"))?; - let genesis_state = format!("0x{:?}", HexDisplay::from(&block.header().encode())); - let genesis_hash = format!("0x{:?}", HexDisplay::from(&block.header().hash().0)); - let polkadot_config = SubstrateCli::create_configuration( &polkadot_cli, &polkadot_cli, @@ -522,9 +506,6 @@ pub fn run() -> Result<()> { info!("Parachain id: {:?}", para_id); info!("Parachain Account: {}", parachain_account); - info!("Parachain genesis state: {}", genesis_state); - info!("Parachain genesis hash: {}", genesis_hash); - debug!("Parachain genesis block: {:?}", block); info!( "Is collating: {}", if config.role.is_authority() { diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index b50c6e2fcb..f48f011416 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -185,7 +185,7 @@ impl Stream for AutosealInterval { } } -pub fn open_frontier_backend>( +pub fn open_frontier_backend>( client: Arc, config: &Configuration, ) -> Result>, String> { @@ -210,12 +210,42 @@ type FullSelectChain = sc_consensus::LongestChain; type ParachainBlockImport = TParachainBlockImport>, FullBackend>; +/// Generate a supertrait based on bounds, and blanket impl for it. +macro_rules! ez_bounds { + ($vis:vis trait $name:ident$(<$($gen:ident $(: $($(+)? $bound:path)*)?),* $(,)?>)? $(:)? $($(+)? $super:path)* {}) => { + $vis trait $name $(<$($gen $(: $($bound+)*)?,)*>)?: $($super +)* {} + impl $name$(<$($gen,)*>)? for T + where T: $($super +)* {} + } +} +ez_bounds!( + pub trait RuntimeApiDep: + sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_consensus_aura::AuraApi + + fp_rpc::EthereumRuntimeRPCApi + + sp_session::SessionKeys + + sp_block_builder::BlockBuilder + + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi + + sp_api::ApiExt + + up_rpc::UniqueApi + + app_promotion_rpc::AppPromotionApi + + up_pov_estimate_rpc::PovEstimateApi + + substrate_frame_rpc_system::AccountNonceApi + + sp_api::Metadata + + sp_offchain::OffchainWorkerApi + + cumulus_primitives_core::CollectCollationInfo + // Deprecated, not used. + + fp_rpc::ConvertTransactionRuntimeApi + { + } +); + /// Starts a `ServiceBuilder` for a full service. /// /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. #[allow(clippy::type_complexity)] -pub fn new_partial( +pub fn new_partial( config: &Configuration, build_import_queue: BIQ, ) -> Result< @@ -223,7 +253,7 @@ pub fn new_partial( FullClient, FullBackend, FullSelectChain, - sc_consensus::DefaultImportQueue>, + sc_consensus::DefaultImportQueue, sc_transaction_pool::FullPool>, OtherPartial, >, @@ -235,7 +265,8 @@ where + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue, + RuntimeApi::RuntimeApi: RuntimeApiDep + 'static, + Runtime: RuntimeInstance, ExecutorDispatch: NativeExecutionDispatch + 'static, BIQ: FnOnce( Arc>, @@ -243,10 +274,7 @@ where &Configuration, Option, &TaskManager, - ) -> Result< - sc_consensus::DefaultImportQueue>, - sc_service::Error, - >, + ) -> Result, sc_service::Error>, { let telemetry = config .telemetry_endpoints @@ -319,35 +347,6 @@ where Ok(params) } -async fn build_relay_chain_interface( - polkadot_config: Configuration, - parachain_config: &Configuration, - telemetry_worker_handle: Option, - task_manager: &mut TaskManager, - collator_options: CollatorOptions, - hwbench: Option, -) -> RelayChainResult<( - Arc<(dyn RelayChainInterface + 'static)>, - Option, -)> { - if collator_options.relay_chain_rpc_urls.is_empty() { - build_inprocess_relay_chain( - polkadot_config, - parachain_config, - telemetry_worker_handle, - task_manager, - hwbench, - ) - } else { - build_minimal_relay_chain_node( - polkadot_config, - task_manager, - collator_options.relay_chain_rpc_urls, - ) - .await - } -} - macro_rules! clone { ($($i:ident),* $(,)?) => { $( @@ -360,13 +359,11 @@ macro_rules! clone { /// /// This is the actual implementation that is abstract over the executor and the runtime api. #[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_node_impl( +pub async fn start_node( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, - id: ParaId, - build_import_queue: BIQ, - build_consensus: BIC, + para_id: ParaId, hwbench: Option, ) -> sc_service::error::Result<(TaskManager, Arc>)> where @@ -378,48 +375,16 @@ where + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + fp_rpc::EthereumRuntimeRPCApi - + fp_rpc::ConvertTransactionRuntimeApi - + sp_session::SessionKeys - + sp_block_builder::BlockBuilder - + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi - + sp_api::ApiExt> - + up_rpc::UniqueApi - + app_promotion_rpc::AppPromotionApi - + up_pov_estimate_rpc::PovEstimateApi - + substrate_frame_rpc_system::AccountNonceApi - + sp_api::Metadata - + sp_offchain::OffchainWorkerApi - + cumulus_primitives_core::CollectCollationInfo, + RuntimeApi::RuntimeApi: RuntimeApiDep + 'static, + Runtime: RuntimeInstance, ExecutorDispatch: NativeExecutionDispatch + 'static, - BIQ: FnOnce( - Arc>, - Arc, - &Configuration, - Option, - &TaskManager, - ) -> Result< - sc_consensus::DefaultImportQueue>, - sc_service::Error, - >, - BIC: FnOnce( - Arc>, - Arc, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc>>, - Arc>, - KeystorePtr, - bool, - ) -> Result>, sc_service::Error>, { let parachain_config = prepare_node_config(parachain_config); - let params = - new_partial::(¶chain_config, build_import_queue)?; + let params = new_partial::( + ¶chain_config, + parachain_build_import_queue, + )?; let OtherPartial { mut telemetry, telemetry_worker_handle, @@ -443,9 +408,9 @@ where .await .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - let block_announce_validator = BlockAnnounceValidator::new(relay_chain_interface.clone(), id); + let block_announce_validator = + RequireSecondedInBlockAnnounce::new(relay_chain_interface.clone(), para_id); - let force_authoring = parachain_config.force_authoring; let validator = parachain_config.role.is_authority(); let prometheus_registry = parachain_config.prometheus_registry().cloned(); let transaction_pool = params.transaction_pool.clone(); @@ -531,7 +496,7 @@ where let mut rpc_handle = RpcModule::new(()); - let full_deps = unique_rpc::FullDeps { + let full_deps = FullDeps { client: client.clone(), runtime_id, @@ -551,9 +516,9 @@ where select_chain, }; - unique_rpc::create_full::<_, _, _, Runtime, RuntimeApi, _>(&mut rpc_handle, full_deps)?; + create_full::<_, _, _, Runtime, RuntimeApi, _>(&mut rpc_handle, full_deps)?; - let eth_deps = unique_rpc::EthDeps { + let eth_deps = EthDeps { client, graph: transaction_pool.pool().clone(), pool: transaction_pool, @@ -571,9 +536,18 @@ where eth_pubsub_notification_sinks, overrides, sync: sync_service.clone(), + pending_create_inherent_data_providers: |_, ()| async move { Ok(()) }, }; - unique_rpc::create_eth( + create_eth::< + _, + _, + _, + _, + _, + _, + DefaultEthConfig>, + >( &mut rpc_handle, eth_deps, subscription_task_executor.clone(), @@ -624,8 +598,25 @@ where .overseer_handle() .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + start_relay_chain_tasks(StartRelayChainTasksParams { + client: client.clone(), + announce_block: announce_block.clone(), + para_id, + relay_chain_interface: relay_chain_interface.clone(), + task_manager: &mut task_manager, + da_recovery_profile: if validator { + DARecoveryProfile::Collator + } else { + DARecoveryProfile::FullNode + }, + import_queue: import_queue_service, + relay_chain_slot_duration, + recovery_handle: Box::new(overseer_handle.clone()), + sync_service: sync_service.clone(), + })?; + if validator { - let parachain_consensus = build_consensus( + start_consensus( client.clone(), backend.clone(), prometheus_registry.as_ref(), @@ -635,42 +626,12 @@ where transaction_pool, sync_service.clone(), params.keystore_container.keystore(), - force_authoring, - )?; - - let spawner = task_manager.spawn_handle(); - - let params = StartCollatorParams { - para_id: id, - block_status: client.clone(), - announce_block, - client: client.clone(), - task_manager: &mut task_manager, - spawner, - parachain_consensus, - import_queue: import_queue_service, - collator_key: collator_key.expect("Command line arguments do not allow this. qed"), - relay_chain_interface, + overseer_handle, relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle), - sync_service, - }; - - start_collator(params).await?; - } else { - let params = StartFullNodeParams { - client: client.clone(), + para_id, + collator_key.expect("cli args do not allow this"), announce_block, - task_manager: &mut task_manager, - para_id: id, - import_queue: import_queue_service, - relay_chain_interface, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle), - sync_service, - }; - - start_full_node(params)?; + )?; } start_network.start_network(); @@ -679,25 +640,20 @@ where } /// Build the import queue for the the parachain runtime. -pub fn parachain_build_import_queue( +pub fn parachain_build_import_queue( client: Arc>, backend: Arc, config: &Configuration, telemetry: Option, task_manager: &TaskManager, -) -> Result< - sc_consensus::DefaultImportQueue>, - sc_service::Error, -> +) -> Result, sc_service::Error> where RuntimeApi: sp_api::ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_block_builder::BlockBuilder - + sp_consensus_aura::AuraApi - + sp_api::ApiExt>, + RuntimeApi::RuntimeApi: RuntimeApiDep + 'static, + Runtime: RuntimeInstance, ExecutorDispatch: NativeExecutionDispatch + 'static, { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; @@ -732,120 +688,81 @@ where .map_err(Into::into) } -/// Start a normal parachain node. -pub async fn start_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +pub fn start_consensus( + client: Arc>, + backend: Arc, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc< + sc_transaction_pool::FullPool>, + >, + sync_oracle: Arc>, + keystore: KeystorePtr, + overseer_handle: OverseerHandle, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + announce_block: Arc>) + Send + Sync>, +) -> Result<(), sc_service::Error> where - Runtime: RuntimeInstance + Send + Sync + 'static, - ::CrossAccountId: Serialize, - for<'de> ::CrossAccountId: Deserialize<'de>, + ExecutorDispatch: NativeExecutionDispatch + 'static, RuntimeApi: sp_api::ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + fp_rpc::EthereumRuntimeRPCApi - + fp_rpc::ConvertTransactionRuntimeApi - + sp_session::SessionKeys - + sp_block_builder::BlockBuilder - + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi - + sp_api::ApiExt> - + up_rpc::UniqueApi - + app_promotion_rpc::AppPromotionApi - + up_pov_estimate_rpc::PovEstimateApi - + substrate_frame_rpc_system::AccountNonceApi - + sp_api::Metadata - + sp_offchain::OffchainWorkerApi - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi, - ExecutorDispatch: NativeExecutionDispatch + 'static, + RuntimeApi::RuntimeApi: RuntimeApiDep + 'static, + Runtime: RuntimeInstance, { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - id, - parachain_build_import_queue, - |client, - backend, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - force_authoring| { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); + let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - let block_import = ParachainBlockImport::new(client.clone(), backend); + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); + let proposer = Proposer::new(proposer_factory); - Ok(AuraConsensus::build::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - _, - >(BuildAuraConsensusParams { - proposer_factory, - create_inherent_data_providers: move |_, (relay_parent, validation_data)| { - let relay_chain_interface = relay_chain_interface.clone(); - async move { - let parachain_inherent = - cumulus_primitives_parachain_inherent::ParachainInherentData::create_at( - relay_parent, - &relay_chain_interface, - &validation_data, - id, - ).await; + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); - let time = sp_timestamp::InherentDataProvider::from_system_time(); + let block_import = ParachainBlockImport::new(client.clone(), backend); - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *time, - slot_duration, - ); + let params = BuildAuraConsensusParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client, + #[cfg(feature = "lookahead")] + para_backend: backend, + para_id, + relay_client: relay_chain_interface, + sync_oracle, + keystore, + slot_duration, + proposer, + collator_service, + // With async-baking, we allowed to be both slower (longer authoring) and faster (multiple para blocks per relay block) + authoring_duration: Duration::from_millis(500), + overseer_handle, + #[cfg(feature = "lookahead")] + code_hash_provider: || {}, + collator_key, + relay_chain_slot_duration, + }; - let parachain_inherent = parachain_inherent.ok_or_else(|| { - Box::::from( - "Failed to create parachain inherent", - ) - })?; - Ok((slot, time, parachain_inherent)) - } - }, - block_import, - para_client: client, - backoff_authoring_blocks: Option::<()>::None, - sync_oracle, - keystore, - force_authoring, - slot_duration, - // We got around 500ms for proposing - block_proposal_slot_portion: SlotProportion::new(1f32 / 24f32), - telemetry, - max_block_proposal_slot_portion: None, - })) - }, - hwbench, - ) - .await + task_manager.spawn_essential_handle().spawn( + "aura", + None, + run_aura::<_, AuraAuthorityPair, _, _, _, _, _, _, _>(params), + ); + Ok(()) } fn dev_build_import_queue( @@ -854,17 +771,14 @@ fn dev_build_import_queue( config: &Configuration, _: Option, task_manager: &TaskManager, -) -> Result< - sc_consensus::DefaultImportQueue>, - sc_service::Error, -> +) -> Result, sc_service::Error> where RuntimeApi: sp_api::ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::ApiExt>, + RuntimeApi::RuntimeApi: + sp_transaction_pool::runtime_api::TaggedTransactionQueue + sp_api::ApiExt, ExecutorDispatch: NativeExecutionDispatch + 'static, { Ok(sc_consensus_manual_seal::import_queue( @@ -881,6 +795,15 @@ pub struct OtherPartial { pub eth_backend: Arc>, } +struct DefaultEthConfig(PhantomData); +impl EthConfig for DefaultEthConfig +where + C: StorageProvider + Sync + Send + 'static, +{ + type EstimateGasAdapter = (); + type RuntimeStorageOverride = SystemAccountId32StorageOverride; +} + /// Builds a new development service. This service uses instant seal, and mocks /// the parachain inherent pub fn start_dev_node( @@ -897,28 +820,14 @@ where + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + fp_rpc::EthereumRuntimeRPCApi - + fp_rpc::ConvertTransactionRuntimeApi - + sp_session::SessionKeys - + sp_block_builder::BlockBuilder - + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi - + sp_api::ApiExt> - + up_rpc::UniqueApi - + app_promotion_rpc::AppPromotionApi - + up_pov_estimate_rpc::PovEstimateApi - + substrate_frame_rpc_system::AccountNonceApi - + sp_api::Metadata - + sp_offchain::OffchainWorkerApi - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi, + RuntimeApi::RuntimeApi: RuntimeApiDep + 'static, ExecutorDispatch: NativeExecutionDispatch + 'static, { + use fc_consensus::FrontierBlockImport; use sc_consensus_manual_seal::{ run_manual_seal, run_delayed_finalize, EngineCommand, ManualSealParams, DelayedFinalizeParams, }; - use fc_consensus::FrontierBlockImport; let sc_service::PartialComponents { client, @@ -935,7 +844,7 @@ where eth_backend, telemetry_worker_handle: _, }, - } = new_partial::( + } = new_partial::( &config, dev_build_import_queue::, )?; @@ -954,15 +863,6 @@ where warp_sync_params: None, })?; - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - let collator = config.role.is_authority(); let select_chain = maybe_select_chain; @@ -1141,7 +1041,7 @@ where let mut rpc_module = RpcModule::new(()); - let full_deps = unique_rpc::FullDeps { + let full_deps = FullDeps { runtime_id, #[cfg(feature = "pov-estimate")] @@ -1161,9 +1061,9 @@ where select_chain, }; - unique_rpc::create_full::<_, _, _, Runtime, RuntimeApi, _>(&mut rpc_module, full_deps)?; + create_full::<_, _, _, Runtime, RuntimeApi, _>(&mut rpc_module, full_deps)?; - let eth_deps = unique_rpc::EthDeps { + let eth_deps = EthDeps { client, graph: transaction_pool.pool().clone(), pool: transaction_pool, @@ -1181,9 +1081,19 @@ where eth_pubsub_notification_sinks, overrides, sync: sync_service.clone(), + // We don't have any inherents except parachain built-ins, which we can't even extract from inside `run_aura`. + pending_create_inherent_data_providers: |_, ()| async move { Ok(()) }, }; - unique_rpc::create_eth( + create_eth::< + _, + _, + _, + _, + _, + _, + DefaultEthConfig>, + >( &mut rpc_module, eth_deps, subscription_task_executor.clone(), @@ -1241,37 +1151,35 @@ where }) } -pub struct FrontierTaskParams<'a, B: BlockT, C, BE> { +pub struct FrontierTaskParams<'a, C, B> { pub task_manager: &'a TaskManager, pub client: Arc, - pub substrate_backend: Arc, - pub eth_backend: Arc>, + pub substrate_backend: Arc, + pub eth_backend: Arc>, pub eth_filter_pool: Option, - pub overrides: Arc>, + pub overrides: Arc>, pub fee_history_limit: u64, pub fee_history_cache: FeeHistoryCache, pub sync_strategy: SyncStrategy, pub prometheus_registry: Option, } -pub fn spawn_frontier_tasks( - params: FrontierTaskParams, - sync: Arc>, +pub fn spawn_frontier_tasks( + params: FrontierTaskParams, + sync: Arc>, pubsub_notification_sinks: Arc< - EthereumBlockNotificationSinks>, + EthereumBlockNotificationSinks>, >, -) -> Arc> +) -> Arc> where - C: ProvideRuntimeApi + BlockOf, - C: HeaderBackend + HeaderMetadata + 'static, - C: BlockchainEvents + StorageProvider, + C: ProvideRuntimeApi + BlockOf, + C: HeaderBackend + HeaderMetadata + 'static, + C: BlockchainEvents + StorageProvider, C: Send + Sync + 'static, - C::Api: EthereumRuntimeRPCApi, - C::Api: BlockBuilder, - B: BlockT + Send + Sync + 'static, - B::Header: HeaderT, - BE: Backend + 'static, - BE::State: StateBackend, + C::Api: EthereumRuntimeRPCApi, + C::Api: BlockBuilder, + B: Backend + 'static, + B::State: StateBackend, { let FrontierTaskParams { task_manager, diff --git a/pallets/app-promotion/src/benchmarking.rs b/pallets/app-promotion/src/benchmarking.rs index 9acd558ec9..bac9c0ce59 100644 --- a/pallets/app-promotion/src/benchmarking.rs +++ b/pallets/app-promotion/src/benchmarking.rs @@ -32,7 +32,7 @@ const SEED: u32 = 0; fn set_admin() -> Result where T: Config + pallet_unique::Config + pallet_evm_migration::Config, - T::BlockNumber: From + Into, + BlockNumberFor: From + Into, BalanceOf: Sum + From, { let pallet_admin = account::("admin", 0, SEED); @@ -53,7 +53,7 @@ where benchmarks! { where_clause{ where T: Config + pallet_unique::Config + pallet_evm_migration::Config , - T::BlockNumber: From + Into, + BlockNumberFor: From + Into, BalanceOf: Sum + From } diff --git a/pallets/app-promotion/src/lib.rs b/pallets/app-promotion/src/lib.rs index b4920ad96f..d4a633e59f 100644 --- a/pallets/app-promotion/src/lib.rs +++ b/pallets/app-promotion/src/lib.rs @@ -125,11 +125,11 @@ pub mod pallet { /// In relay blocks. #[pallet::constant] - type RecalculationInterval: Get; + type RecalculationInterval: Get>; /// In parachain blocks. #[pallet::constant] - type PendingInterval: Get; + type PendingInterval: Get>; /// Rate of return for interval in blocks defined in `RecalculationInterval`. #[pallet::constant] @@ -146,7 +146,7 @@ pub mod pallet { type WeightInfo: WeightInfo; // The relay block number provider - type RelayBlockNumberProvider: BlockNumberProvider; + type RelayBlockNumberProvider: BlockNumberProvider>; /// Events compatible with [`frame_system::Config::Event`]. type RuntimeEvent: IsType<::RuntimeEvent> + From>; @@ -230,9 +230,9 @@ pub mod pallet { pub type Staked = StorageNMap< Key = ( Key, - Key, + Key>, ), - Value = (BalanceOf, T::BlockNumber), + Value = (BalanceOf, BlockNumberFor), QueryKind = ValueQuery, >; @@ -252,7 +252,7 @@ pub mod pallet { pub type PendingUnstake = StorageMap< _, Twox64Concat, - T::BlockNumber, + BlockNumberFor, BoundedVec<(T::AccountId, BalanceOf), ConstU32>, ValueQuery, >; @@ -262,16 +262,16 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn get_next_calculated_record)] pub type PreviousCalculatedRecord = - StorageValue; + StorageValue), QueryKind = OptionQuery>; #[pallet::hooks] impl Hooks> for Pallet { /// Block overflow is impossible due to the fact that the unstake algorithm in on_initialize /// implies the execution of a strictly limited number of relatively lightweight operations. /// A separate benchmark has been implemented to scale the weight depending on the number of pendings. - fn on_initialize(current_block_number: T::BlockNumber) -> Weight + fn on_initialize(current_block_number: BlockNumberFor) -> Weight where - ::BlockNumber: From, + BlockNumberFor: From, { if T::IsMaintenanceModeEnabled::get() { return T::DbWeight::get().reads_writes(1, 0); @@ -302,7 +302,7 @@ pub mod pallet { #[pallet::call] impl Pallet where - T::BlockNumber: From + Into, + BlockNumberFor: From + Into, <::Currency as Inspect>::Balance: Sum + From, { /// Sets an address as the the admin. @@ -369,7 +369,7 @@ pub mod pallet { // Calculation of the number of recalculation periods, // after how much the first interest calculation should be performed for the stake - let recalculate_after_interval: T::BlockNumber = + let recalculate_after_interval: BlockNumberFor = if block_number % config.recalculation_interval == 0u32.into() { 1u32.into() } else { @@ -705,7 +705,7 @@ pub mod pallet { #[pallet::weight(::WeightInfo::on_initialize(PENDING_LIMIT_PER_BLOCK*pending_blocks.len() as u32))] pub fn force_unstake( origin: OriginFor, - pending_blocks: Vec, + pending_blocks: Vec>, ) -> DispatchResult { ensure_root(origin)?; @@ -917,7 +917,7 @@ impl Pallet { /// - `staker`: staker account. pub fn total_staked_by_id_per_block( staker: impl EncodeLike, - ) -> Option)>> { + ) -> Option, BalanceOf)>> { let mut staked = Staked::::iter_prefix((staker,)) .map(|(block, (amount, _))| (block, amount)) .collect::>(); @@ -944,14 +944,14 @@ impl Pallet { /// - `staker`: staker account. pub fn cross_id_total_staked_per_block( staker: T::CrossAccountId, - ) -> Vec<(T::BlockNumber, BalanceOf)> { + ) -> Vec<(BlockNumberFor, BalanceOf)> { Self::total_staked_by_id_per_block(staker.as_sub()).unwrap_or_default() } fn recalculate_and_insert_stake( staker: &T::AccountId, - staked_block: T::BlockNumber, - next_recalc_block: T::BlockNumber, + staked_block: BlockNumberFor, + next_recalc_block: BlockNumberFor, base: BalanceOf, iters: u32, income_acc: &mut BalanceOf, @@ -979,9 +979,9 @@ impl Pallet { /// Get relay block number rounded down to multiples of config.recalculation_interval. /// We need it to reward stakers in integer parts of recalculation_interval fn get_current_recalc_block( - current_relay_block: T::BlockNumber, + current_relay_block: BlockNumberFor, config: &PalletConfiguration, - ) -> T::BlockNumber { + ) -> BlockNumberFor { (current_relay_block / config.recalculation_interval) * config.recalculation_interval } @@ -1028,7 +1028,7 @@ where /// - `staker`: staker account. pub fn cross_id_pending_unstake_per_block( staker: T::CrossAccountId, - ) -> Vec<(T::BlockNumber, BalanceOf)> { + ) -> Vec<(BlockNumberFor, BalanceOf)> { let mut unsorted_res = vec![]; PendingUnstake::::iter().for_each(|(block, pendings)| { pendings.into_iter().for_each(|(id, amount)| { diff --git a/pallets/app-promotion/src/types.rs b/pallets/app-promotion/src/types.rs index ac9315ed31..292efe55cd 100644 --- a/pallets/app-promotion/src/types.rs +++ b/pallets/app-promotion/src/types.rs @@ -114,9 +114,9 @@ impl ContractHandler for EvmHelpersPallet { } pub(crate) struct PalletConfiguration { /// In relay blocks. - pub recalculation_interval: T::BlockNumber, + pub recalculation_interval: BlockNumberFor, /// In parachain blocks. - pub pending_interval: T::BlockNumber, + pub pending_interval: BlockNumberFor, /// Value for `RecalculationInterval` based on 0.05% per 24h. pub interval_income: Perbill, /// Maximum allowable number of stakers calculated per call of the `app-promotion::PayoutStakers` extrinsic. diff --git a/pallets/collator-selection/src/benchmarking.rs b/pallets/collator-selection/src/benchmarking.rs index 6ae72acf8e..85fe54c526 100644 --- a/pallets/collator-selection/src/benchmarking.rs +++ b/pallets/collator-selection/src/benchmarking.rs @@ -51,6 +51,10 @@ use pallet_authorship::EventHandler; use pallet_session::{self as session, SessionManager}; use sp_std::prelude::*; +use super::*; +#[allow(unused)] +use crate::{BalanceOf, Pallet as CollatorSelection}; + const SEED: u32 = 0; // TODO: remove if this is given in substrate commit. @@ -317,7 +321,7 @@ benchmarks! { balance_unit::() * 4u32.into(), ); let author = account("author", 0, SEED); - let new_block: T::BlockNumber = 10u32.into(); + let new_block: BlockNumberFor= 10u32.into(); frame_system::Pallet::::set_block_number(new_block); assert!(T::Currency::balance(&author) == 0u32.into()); @@ -338,7 +342,7 @@ benchmarks! { register_validators::(c); register_candidates::(c); - let new_block: T::BlockNumber = 1800u32.into(); + let new_block: BlockNumberFor= 1800u32.into(); let zero_block: T::BlockNumber = 0u32.into(); let candidates = >::get(); diff --git a/pallets/collator-selection/src/lib.rs b/pallets/collator-selection/src/lib.rs index 99db1d7d1b..027b995e7e 100644 --- a/pallets/collator-selection/src/lib.rs +++ b/pallets/collator-selection/src/lib.rs @@ -131,8 +131,11 @@ pub mod pallet { pub trait Config: frame_system::Config { /// Overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// Overarching hold reason. + type RuntimeHoldReason: From; + type Currency: Mutate - + MutateHold + + MutateHold + BalancedHold; /// Origin that can dictate updating parameters of this pallet. @@ -164,14 +167,17 @@ pub mod pallet { /// The weight information of this pallet. type WeightInfo: WeightInfo; - #[pallet::constant] - type LicenceBondIdentifier: Get<>::Reason>; - type DesiredCollators: Get; type LicenseBond: Get>; - type KickThreshold: Get; + type KickThreshold: Get>; + } + + #[pallet::composite_enum] + pub enum HoldReason { + /// The funds are held as the license bond. + LicenseBond, } #[pallet::pallet] @@ -199,14 +205,13 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn last_authored_block)] pub type LastAuthoredBlock = - StorageMap<_, Twox64Concat, T::AccountId, T::BlockNumber, ValueQuery>; + StorageMap<_, Twox64Concat, T::AccountId, BlockNumberFor, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig { pub invulnerables: Vec, } - #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { Self { @@ -216,12 +221,11 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - let duplicate_invulnerables = self - .invulnerables - .iter() - .collect::>(); + use sp_std::collections::btree_set::BTreeSet; + + let duplicate_invulnerables = self.invulnerables.iter().collect::>(); assert!( duplicate_invulnerables.len() == self.invulnerables.len(), "duplicate invulnerables in genesis." @@ -375,7 +379,7 @@ pub mod pallet { let deposit = T::LicenseBond::get(); - T::Currency::hold(&T::LicenceBondIdentifier::get(), &who, deposit)?; + T::Currency::hold(&HoldReason::LicenseBond.into(), &who, deposit)?; LicenseDepositOf::::insert(who.clone(), deposit); Self::deposit_event(Event::LicenseObtained { @@ -538,7 +542,7 @@ pub mod pallet { let remaining = deposit - slashed; let (imbalance, _) = - T::Currency::slash(&T::LicenceBondIdentifier::get(), who, slashed); + T::Currency::slash(&HoldReason::LicenseBond.into(), who, slashed); deposit_returned = remaining; T::Currency::resolve(&T::TreasuryAccountId::get(), imbalance) @@ -548,7 +552,7 @@ pub mod pallet { } T::Currency::release( - &T::LicenceBondIdentifier::get(), + &HoldReason::LicenseBond.into(), who, deposit_returned, Precision::Exact, @@ -608,7 +612,7 @@ pub mod pallet { /// Keep track of number of authored blocks per authority, uncles are counted as well since /// they're a valid proof of being online. impl - pallet_authorship::EventHandler for Pallet + pallet_authorship::EventHandler> for Pallet { fn note_author(author: T::AccountId) { let pot = Self::account_id(); diff --git a/pallets/collator-selection/src/mock.rs b/pallets/collator-selection/src/mock.rs index ba0c7d33fe..f273eeedde 100644 --- a/pallets/collator-selection/src/mock.rs +++ b/pallets/collator-selection/src/mock.rs @@ -51,18 +51,14 @@ type Block = frame_system::mocking::MockBlock; // Configure a mock runtime to test the pallet. frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Session: pallet_session::{Pallet, Call, Storage, Event, Config}, - Aura: pallet_aura::{Pallet, Storage, Config}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - CollatorSelection: collator_selection::{Pallet, Call, Storage, Event}, - Authorship: pallet_authorship::{Pallet, Storage}, + pub enum Test { + System: frame_system, + Timestamp: pallet_timestamp, + Session: pallet_session, + Aura: pallet_aura, + Balances: pallet_balances, + CollatorSelection: collator_selection, + Authorship: pallet_authorship, } ); @@ -78,13 +74,11 @@ impl system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type Version = (); @@ -115,7 +109,6 @@ impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; - type HoldIdentifier = [u8; 16]; type FreezeIdentifier = [u8; 16]; type MaxHolds = MaxHolds; type MaxFreezes = MaxFreezes; diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index fbbd29048a..e28b90e30c 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -463,7 +463,6 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig(PhantomData); - #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { Self(Default::default()) @@ -471,7 +470,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { StorageVersion::new(1).put::>(); } diff --git a/pallets/configuration/src/benchmarking.rs b/pallets/configuration/src/benchmarking.rs index 8c566b313d..8399b00e79 100644 --- a/pallets/configuration/src/benchmarking.rs +++ b/pallets/configuration/src/benchmarking.rs @@ -52,7 +52,7 @@ benchmarks! { } set_app_promotion_configuration_override { - let configuration: AppPromotionConfiguration = Default::default(); + let configuration: AppPromotionConfiguration> = Default::default(); }: { assert_ok!( >::set_app_promotion_configuration_override(RawOrigin::Root.into(), configuration) @@ -82,7 +82,7 @@ benchmarks! { } set_collator_selection_kick_threshold { - let threshold: Option = Some(900u32.into()); + let threshold: Option> = Some(900u32.into()); }: { assert_ok!( >::set_collator_selection_kick_threshold(RawOrigin::Root.into(), threshold) diff --git a/pallets/configuration/src/lib.rs b/pallets/configuration/src/lib.rs index f1c3a88a78..7648caa1e8 100644 --- a/pallets/configuration/src/lib.rs +++ b/pallets/configuration/src/lib.rs @@ -80,14 +80,14 @@ mod pallet { #[pallet::constant] type AppPromotionDailyRate: Get; #[pallet::constant] - type DayRelayBlocks: Get; + type DayRelayBlocks: Get>; #[pallet::constant] type DefaultCollatorSelectionMaxCollators: Get; #[pallet::constant] type DefaultCollatorSelectionLicenseBond: Get; #[pallet::constant] - type DefaultCollatorSelectionKickThreshold: Get; + type DefaultCollatorSelectionKickThreshold: Get>; /// The weight information of this pallet. type WeightInfo: WeightInfo; @@ -103,7 +103,7 @@ mod pallet { bond_cost: Option, }, NewCollatorKickThreshold { - length_in_blocks: Option, + length_in_blocks: Option>, }, } @@ -134,7 +134,6 @@ mod pallet { #[pallet::genesis_config] pub struct GenesisConfig(PhantomData); - #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { Self(Default::default()) @@ -142,7 +141,7 @@ mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { update_base_fee::(); } @@ -166,7 +165,7 @@ mod pallet { #[pallet::storage] pub type AppPromomotionConfigurationOverride = - StorageValue, QueryKind = ValueQuery>; + StorageValue>, QueryKind = ValueQuery>; #[pallet::storage] pub type CollatorSelectionDesiredCollatorsOverride = StorageValue< @@ -184,7 +183,7 @@ mod pallet { #[pallet::storage] pub type CollatorSelectionKickThresholdOverride = StorageValue< - Value = T::BlockNumber, + Value = BlockNumberFor, QueryKind = ValueQuery, OnEmpty = T::DefaultCollatorSelectionKickThreshold, >; @@ -228,7 +227,7 @@ mod pallet { #[pallet::weight(T::WeightInfo::set_app_promotion_configuration_override())] pub fn set_app_promotion_configuration_override( origin: OriginFor, - mut configuration: AppPromotionConfiguration, + mut configuration: AppPromotionConfiguration>, ) -> DispatchResult { ensure_root(origin)?; if configuration.interval_income.is_some() { @@ -287,7 +286,7 @@ mod pallet { #[pallet::weight(T::WeightInfo::set_collator_selection_kick_threshold())] pub fn set_collator_selection_kick_threshold( origin: OriginFor, - threshold: Option, + threshold: Option>, ) -> DispatchResult { ensure_root(origin)?; if let Some(threshold) = threshold { diff --git a/pallets/evm-contract-helpers/src/eth.rs b/pallets/evm-contract-helpers/src/eth.rs index 542614f773..e58a7de244 100644 --- a/pallets/evm-contract-helpers/src/eth.rs +++ b/pallets/evm-contract-helpers/src/eth.rs @@ -422,7 +422,7 @@ impl SponsorshipHandler { return None; } - let block_number = >::block_number() as T::BlockNumber; + let block_number = >::block_number() as BlockNumberFor; if let Some(last_tx_block) = >::get(contract_address, who.as_eth()) { let limit = >::get(contract_address); diff --git a/pallets/evm-contract-helpers/src/lib.rs b/pallets/evm-contract-helpers/src/lib.rs index 68df173789..768af126ed 100644 --- a/pallets/evm-contract-helpers/src/lib.rs +++ b/pallets/evm-contract-helpers/src/lib.rs @@ -53,7 +53,7 @@ pub mod pallet { /// In case of enabled sponsoring, but no sponsoring rate limit set, /// this value will be used implicitly - type DefaultSponsoringRateLimit: Get; + type DefaultSponsoringRateLimit: Get>; } #[pallet::error] @@ -115,7 +115,7 @@ pub mod pallet { pub(super) type SponsoringRateLimit = StorageMap< Hasher = Twox128, Key = H160, - Value = T::BlockNumber, + Value = BlockNumberFor, QueryKind = ValueQuery, OnEmpty = T::DefaultSponsoringRateLimit, >; @@ -139,7 +139,7 @@ pub mod pallet { Key1 = H160, Hasher2 = Twox128, Key2 = H160, - Value = T::BlockNumber, + Value = BlockNumberFor, QueryKind = OptionQuery, >; @@ -393,7 +393,7 @@ pub mod pallet { } /// Set duration between two sponsored contract calls - pub fn set_sponsoring_rate_limit(contract: H160, rate_limit: T::BlockNumber) { + pub fn set_sponsoring_rate_limit(contract: H160, rate_limit: BlockNumberFor) { >::insert(contract, rate_limit); } diff --git a/pallets/evm-migration/src/benchmarking.rs b/pallets/evm-migration/src/benchmarking.rs index 46825ad768..b27c3bb0ef 100644 --- a/pallets/evm-migration/src/benchmarking.rs +++ b/pallets/evm-migration/src/benchmarking.rs @@ -23,7 +23,7 @@ use sp_core::{H160, H256}; use sp_std::{vec::Vec, vec}; benchmarks! { - where_clause { where ::RuntimeEvent: codec::Encode } + where_clause { where ::RuntimeEvent: parity_scale_codec::Encode } begin { }: _(RawOrigin::Root, H160::default()) @@ -59,7 +59,7 @@ benchmarks! { insert_events { let b in 0..200; - use codec::Encode; + use parity_scale_codec::Encode; let logs = (0..b).map(|_| ::RuntimeEvent::from(crate::Event::::TestEvent).encode()).collect::>(); }: _(RawOrigin::Root, logs) } diff --git a/pallets/foreign-assets/src/impl_fungibles.rs b/pallets/foreign-assets/src/impl_fungibles.rs index 9321dc8eae..1b34a538a7 100644 --- a/pallets/foreign-assets/src/impl_fungibles.rs +++ b/pallets/foreign-assets/src/impl_fungibles.rs @@ -30,30 +30,30 @@ use sp_runtime::traits::{CheckedAdd, CheckedSub}; impl fungibles::Inspect<::AccountId> for Pallet where - T: orml_tokens::Config, + T: orml_tokens::Config, BalanceOf: From<::Balance>, BalanceOf: From<::Balance>, ::Balance: From>, ::Balance: From>, { - type AssetId = AssetIds; + type AssetId = AssetId; type Balance = BalanceOf; fn total_issuance(asset: Self::AssetId) -> Self::Balance { log::trace!(target: "fassets::impl_foreign_assets", "impl_fungible total_issuance"); match asset { - AssetIds::NativeAssetId(NativeCurrency::Here) => { + AssetId::NativeAssetId(NativeCurrency::Here) => { as fungible::Inspect>::total_issuance() .into() } - AssetIds::NativeAssetId(NativeCurrency::Parent) => { + AssetId::NativeAssetId(NativeCurrency::Parent) => { as fungibles::Inspect>::total_issuance( - AssetIds::NativeAssetId(NativeCurrency::Parent), + AssetId::NativeAssetId(NativeCurrency::Parent), ) .into() } - AssetIds::ForeignAssetId(fid) => { + AssetId::ForeignAssetId(fid) => { let target_collection_id = match >::get(fid) { Some(v) => v, None => return Zero::zero(), @@ -71,38 +71,36 @@ where fn minimum_balance(asset: Self::AssetId) -> Self::Balance { log::trace!(target: "fassets::impl_foreign_assets", "impl_fungible minimum_balance"); match asset { - AssetIds::NativeAssetId(NativeCurrency::Here) => { + AssetId::NativeAssetId(NativeCurrency::Here) => { as fungible::Inspect>::minimum_balance() .into() } - AssetIds::NativeAssetId(NativeCurrency::Parent) => { + AssetId::NativeAssetId(NativeCurrency::Parent) => { as fungibles::Inspect>::minimum_balance( - AssetIds::NativeAssetId(NativeCurrency::Parent), + AssetId::NativeAssetId(NativeCurrency::Parent), ) .into() } - AssetIds::ForeignAssetId(fid) => { - AssetMetadatas::::get(AssetIds::ForeignAssetId(fid)) - .map(|x| x.minimal_balance) - .unwrap_or_else(Zero::zero) - } + AssetId::ForeignAssetId(fid) => AssetMetadatas::::get(AssetId::ForeignAssetId(fid)) + .map(|x| x.minimal_balance) + .unwrap_or_else(Zero::zero), } } fn balance(asset: Self::AssetId, who: &::AccountId) -> Self::Balance { log::trace!(target: "fassets::impl_foreign_assets", "impl_fungible balance"); match asset { - AssetIds::NativeAssetId(NativeCurrency::Here) => { + AssetId::NativeAssetId(NativeCurrency::Here) => { as fungible::Inspect>::balance(who).into() } - AssetIds::NativeAssetId(NativeCurrency::Parent) => { + AssetId::NativeAssetId(NativeCurrency::Parent) => { as fungibles::Inspect>::balance( - AssetIds::NativeAssetId(NativeCurrency::Parent), + AssetId::NativeAssetId(NativeCurrency::Parent), who, ) .into() } - AssetIds::ForeignAssetId(fid) => { + AssetId::ForeignAssetId(fid) => { let target_collection_id = match >::get(fid) { Some(v) => v, None => return Zero::zero(), @@ -133,7 +131,7 @@ where log::trace!(target: "fassets::impl_foreign_assets", "impl_fungible reducible_balance"); match asset { - AssetIds::NativeAssetId(NativeCurrency::Here) => { + AssetId::NativeAssetId(NativeCurrency::Here) => { as fungible::Inspect>::reducible_balance( who, preservation, @@ -141,9 +139,9 @@ where ) .into() } - AssetIds::NativeAssetId(NativeCurrency::Parent) => { + AssetId::NativeAssetId(NativeCurrency::Parent) => { as fungibles::Inspect>::reducible_balance( - AssetIds::NativeAssetId(NativeCurrency::Parent), + AssetId::NativeAssetId(NativeCurrency::Parent), who, preservation, fortitude, @@ -163,16 +161,16 @@ where log::trace!(target: "fassets::impl_foreign_assets", "impl_fungible can_deposit"); match asset { - AssetIds::NativeAssetId(NativeCurrency::Here) => { + AssetId::NativeAssetId(NativeCurrency::Here) => { as fungible::Inspect>::can_deposit( who, amount.into(), provenance, ) } - AssetIds::NativeAssetId(NativeCurrency::Parent) => { + AssetId::NativeAssetId(NativeCurrency::Parent) => { as fungibles::Inspect>::can_deposit( - AssetIds::NativeAssetId(NativeCurrency::Parent), + AssetId::NativeAssetId(NativeCurrency::Parent), who, amount.into(), provenance, @@ -219,7 +217,7 @@ where }; match asset { - AssetIds::NativeAssetId(NativeCurrency::Here) => { + AssetId::NativeAssetId(NativeCurrency::Here) => { let this_amount: ::Balance = match value.try_into() { Ok(val) => val, Err(_) => { @@ -240,7 +238,7 @@ where _ => WithdrawConsequence::BalanceLow, } } - AssetIds::NativeAssetId(NativeCurrency::Parent) => { + AssetId::NativeAssetId(NativeCurrency::Parent) => { let parent_amount: ::Balance = match value.try_into() { Ok(val) => val, Err(_) => { @@ -248,7 +246,7 @@ where } }; match as fungibles::Inspect>::can_withdraw( - AssetIds::NativeAssetId(NativeCurrency::Parent), + AssetId::NativeAssetId(NativeCurrency::Parent), who, parent_amount, ) { @@ -269,17 +267,17 @@ where } } - fn asset_exists(asset: AssetIds) -> bool { + fn asset_exists(asset: AssetId) -> bool { match asset { - AssetIds::NativeAssetId(_) => true, - AssetIds::ForeignAssetId(fid) => >::contains_key(fid), + AssetId::NativeAssetId(_) => true, + AssetId::ForeignAssetId(fid) => >::contains_key(fid), } } } impl fungibles::Mutate<::AccountId> for Pallet where - T: orml_tokens::Config, + T: orml_tokens::Config, BalanceOf: From<::Balance>, BalanceOf: From<::Balance>, ::Balance: From>, @@ -295,22 +293,22 @@ where log::trace!(target: "fassets::impl_foreign_assets", "impl_fungible mint_into {:?}", asset); match asset { - AssetIds::NativeAssetId(NativeCurrency::Here) => { + AssetId::NativeAssetId(NativeCurrency::Here) => { as fungible::Mutate>::mint_into( who, amount.into(), ) .map(Into::into) } - AssetIds::NativeAssetId(NativeCurrency::Parent) => { + AssetId::NativeAssetId(NativeCurrency::Parent) => { as fungibles::Mutate>::mint_into( - AssetIds::NativeAssetId(NativeCurrency::Parent), + AssetId::NativeAssetId(NativeCurrency::Parent), who, amount.into(), ) .map(Into::into) } - AssetIds::ForeignAssetId(fid) => { + AssetId::ForeignAssetId(fid) => { let target_collection_id = match >::get(fid) { Some(v) => v, None => { @@ -349,7 +347,7 @@ where log::trace!(target: "fassets::impl_foreign_assets", "impl_fungible burn_from"); match asset { - AssetIds::NativeAssetId(NativeCurrency::Here) => { + AssetId::NativeAssetId(NativeCurrency::Here) => { as fungible::Mutate>::burn_from( who, amount.into(), @@ -358,9 +356,9 @@ where ) .map(Into::into) } - AssetIds::NativeAssetId(NativeCurrency::Parent) => { + AssetId::NativeAssetId(NativeCurrency::Parent) => { as fungibles::Mutate>::burn_from( - AssetIds::NativeAssetId(NativeCurrency::Parent), + AssetId::NativeAssetId(NativeCurrency::Parent), who, amount.into(), precision, @@ -368,7 +366,7 @@ where ) .map(Into::into) } - AssetIds::ForeignAssetId(fid) => { + AssetId::ForeignAssetId(fid) => { let target_collection_id = match >::get(fid) { Some(v) => v, None => { @@ -401,7 +399,7 @@ where log::trace!(target: "fassets::impl_foreign_assets", "impl_fungible transfer"); match asset { - AssetIds::NativeAssetId(NativeCurrency::Here) => { + AssetId::NativeAssetId(NativeCurrency::Here) => { match as fungible::Mutate>::transfer( source, dest, @@ -414,9 +412,9 @@ where )), } } - AssetIds::NativeAssetId(NativeCurrency::Parent) => { + AssetId::NativeAssetId(NativeCurrency::Parent) => { match as fungibles::Mutate>::transfer( - AssetIds::NativeAssetId(NativeCurrency::Parent), + AssetId::NativeAssetId(NativeCurrency::Parent), source, dest, amount.into(), @@ -426,7 +424,7 @@ where Err(e) => Err(e), } } - AssetIds::ForeignAssetId(fid) => { + AssetId::ForeignAssetId(fid) => { let target_collection_id = match >::get(fid) { Some(v) => v, None => { @@ -479,7 +477,7 @@ macro_rules! ensure_balanced { impl fungibles::Unbalanced<::AccountId> for Pallet where - T: orml_tokens::Config, + T: orml_tokens::Config, BalanceOf: From<::Balance>, BalanceOf: From<::Balance>, ::Balance: From>, diff --git a/pallets/foreign-assets/src/lib.rs b/pallets/foreign-assets/src/lib.rs index 6be344d960..88fc345d31 100644 --- a/pallets/foreign-assets/src/lib.rs +++ b/pallets/foreign-assets/src/lib.rs @@ -79,8 +79,9 @@ use serde::{Deserialize, Serialize}; Encode, Decode, TypeInfo, + Serialize, + Deserialize, )] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum NativeCurrency { Here = 0, Parent = 1, @@ -98,9 +99,10 @@ pub enum NativeCurrency { Encode, Decode, TypeInfo, + Serialize, + Deserialize, )] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub enum AssetIds { +pub enum AssetId { ForeignAssetId(ForeignAssetId), NativeAssetId(NativeCurrency), } @@ -109,17 +111,17 @@ pub trait TryAsForeign { fn try_as_foreign(asset: T) -> Option; } -impl TryAsForeign for AssetIds { - fn try_as_foreign(asset: AssetIds) -> Option { +impl TryAsForeign for AssetId { + fn try_as_foreign(asset: AssetId) -> Option { match asset { - AssetIds::ForeignAssetId(id) => Some(id), + Self::ForeignAssetId(id) => Some(id), _ => None, } } } pub type ForeignAssetId = u32; -pub type CurrencyId = AssetIds; +pub type CurrencyId = AssetId; mod impl_fungibles; pub mod weights; @@ -151,7 +153,7 @@ impl AssetIdMapping Option>> { log::trace!(target: "fassets::asset_metadatas", "call"); - Pallet::::asset_metadatas(AssetIds::ForeignAssetId(foreign_asset_id)) + Pallet::::asset_metadatas(AssetId::ForeignAssetId(foreign_asset_id)) } fn get_multi_location(foreign_asset_id: ForeignAssetId) -> Option { @@ -161,7 +163,7 @@ impl AssetIdMapping Option { log::trace!(target: "fassets::get_currency_id", "call"); - Pallet::::location_to_currency_ids(multi_location).map(AssetIds::ForeignAssetId) + Pallet::::location_to_currency_ids(multi_location).map(AssetId::ForeignAssetId) } } @@ -231,12 +233,12 @@ pub mod module { }, /// The asset registered. AssetRegistered { - asset_id: AssetIds, + asset_id: AssetId, metadata: AssetMetadata>, }, /// The asset updated. AssetUpdated { - asset_id: AssetIds, + asset_id: AssetId, metadata: AssetMetadata>, }, } @@ -253,7 +255,7 @@ pub mod module { #[pallet::storage] #[pallet::getter(fn foreign_asset_locations)] pub type ForeignAssetLocations = - StorageMap<_, Twox64Concat, ForeignAssetId, xcm::v3::MultiLocation, OptionQuery>; + StorageMap<_, Twox64Concat, ForeignAssetId, staging_xcm::v3::MultiLocation, OptionQuery>; /// The storages for CurrencyIds. /// @@ -261,7 +263,7 @@ pub mod module { #[pallet::storage] #[pallet::getter(fn location_to_currency_ids)] pub type LocationToCurrencyIds = - StorageMap<_, Twox64Concat, xcm::v3::MultiLocation, ForeignAssetId, OptionQuery>; + StorageMap<_, Twox64Concat, staging_xcm::v3::MultiLocation, ForeignAssetId, OptionQuery>; /// The storages for AssetMetadatas. /// @@ -269,7 +271,7 @@ pub mod module { #[pallet::storage] #[pallet::getter(fn asset_metadatas)] pub type AssetMetadatas = - StorageMap<_, Twox64Concat, AssetIds, AssetMetadata>, OptionQuery>; + StorageMap<_, Twox64Concat, AssetId, AssetMetadata>, OptionQuery>; /// The storages for assets to fungible collection binding /// @@ -381,7 +383,7 @@ impl Pallet { *maybe_location = Some(*location); AssetMetadatas::::try_mutate( - AssetIds::ForeignAssetId(foreign_asset_id), + AssetId::ForeignAssetId(foreign_asset_id), |maybe_asset_metadatas| -> DispatchResult { ensure!(maybe_asset_metadatas.is_none(), Error::::AssetIdExisted); *maybe_asset_metadatas = Some(metadata.clone()); @@ -413,7 +415,7 @@ impl Pallet { .ok_or(Error::::AssetIdNotExists)?; AssetMetadatas::::try_mutate( - AssetIds::ForeignAssetId(foreign_asset_id), + AssetId::ForeignAssetId(foreign_asset_id), |maybe_asset_metadatas| -> DispatchResult { ensure!( maybe_asset_metadatas.is_some(), @@ -450,7 +452,7 @@ pub use frame_support::{ traits::{ fungibles::Balanced, tokens::currency::Currency as CurrencyT, OnUnbalanced as OnUnbalancedT, }, - weights::{WeightToFeePolynomial, WeightToFee}, + weights::{WeightToFee, WeightToFeePolynomial}, }; pub struct FreeForAll< @@ -477,7 +479,12 @@ impl< Self(Weight::default(), Zero::zero(), PhantomData) } - fn buy_weight(&mut self, weight: Weight, payment: Assets) -> Result { + fn buy_weight( + &mut self, + weight: Weight, + payment: Assets, + _xcm: &XcmContext, + ) -> Result { log::trace!(target: "fassets::weight", "buy_weight weight: {:?}, payment: {:?}", weight, payment); Ok(payment) } diff --git a/pallets/identity/src/tests.rs b/pallets/identity/src/tests.rs index 019acaf025..aae4acd15f 100644 --- a/pallets/identity/src/tests.rs +++ b/pallets/identity/src/tests.rs @@ -54,14 +54,10 @@ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Identity: pallet_identity::{Pallet, Call, Storage, Event}, + pub enum Test { + System: frame_system, + Balances: pallet_balances, + Identity: pallet_identity, } ); @@ -71,17 +67,16 @@ parameter_types! { } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; type BlockWeights = (); type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); @@ -106,7 +101,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type WeightInfo = (); - type HoldIdentifier = (); + type RuntimeHoldReason = RuntimeHoldReason; type FreezeIdentifier = (); type MaxHolds = (); type MaxFreezes = (); @@ -139,8 +134,8 @@ impl pallet_identity::Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default() - .build_storage::() + let mut t = >::default() + .build_storage() .unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (10, 100), (20, 100), (30, 100)], diff --git a/pallets/identity/src/types.rs b/pallets/identity/src/types.rs index 4fbcef5596..83e139df5b 100644 --- a/pallets/identity/src/types.rs +++ b/pallets/identity/src/types.rs @@ -77,7 +77,9 @@ impl Data { } impl Decode for Data { - fn decode(input: &mut I) -> sp_std::result::Result { + fn decode( + input: &mut I, + ) -> sp_std::result::Result { let b = input.read_byte()?; Ok(match b { 0 => Data::None, @@ -92,7 +94,7 @@ impl Decode for Data { 35 => Data::Sha256(<[u8; 32]>::decode(input)?), 36 => Data::Keccak256(<[u8; 32]>::decode(input)?), 37 => Data::ShaThree256(<[u8; 32]>::decode(input)?), - _ => return Err(codec::Error::from("invalid leading byte")), + _ => return Err(parity_scale_codec::Error::from("invalid leading byte")), }) } } @@ -114,7 +116,7 @@ impl Encode for Data { } } } -impl codec::EncodeLike for Data {} +impl parity_scale_codec::EncodeLike for Data {} /// Add a Raw variant with the given index and a fixed sized byte array macro_rules! data_raw_variants { @@ -284,7 +286,9 @@ impl Encode for IdentityFields { } } impl Decode for IdentityFields { - fn decode(input: &mut I) -> sp_std::result::Result { + fn decode( + input: &mut I, + ) -> sp_std::result::Result { let field = u64::decode(input)?; Ok(Self( >::from_bits(field).map_err(|_| "invalid value")?, @@ -445,7 +449,9 @@ impl< MaxAdditionalFields: Get, > Decode for Registration { - fn decode(input: &mut I) -> sp_std::result::Result { + fn decode( + input: &mut I, + ) -> sp_std::result::Result { let (judgements, deposit, info) = Decode::decode(&mut AppendZerosInput::new(input))?; Ok(Self { judgements, diff --git a/pallets/inflation/src/lib.rs b/pallets/inflation/src/lib.rs index 47f1eb2e60..c690b2d4ae 100644 --- a/pallets/inflation/src/lib.rs +++ b/pallets/inflation/src/lib.rs @@ -73,11 +73,11 @@ pub mod pallet { type TreasuryAccountId: Get; // The block number provider - type BlockNumberProvider: BlockNumberProvider; + type BlockNumberProvider: BlockNumberProvider>; /// Number of blocks that pass between treasury balance updates due to inflation #[pallet::constant] - type InflationBlockInterval: Get; + type InflationBlockInterval: Get>; } #[pallet::pallet] @@ -95,22 +95,23 @@ pub mod pallet { /// Next target (relay) block when inflation will be applied #[pallet::storage] pub type NextInflationBlock = - StorageValue; + StorageValue, QueryKind = ValueQuery>; /// Next target (relay) block when inflation is recalculated #[pallet::storage] pub type NextRecalculationBlock = - StorageValue; + StorageValue, QueryKind = ValueQuery>; /// Relay block when inflation has started #[pallet::storage] - pub type StartBlock = StorageValue; + pub type StartBlock = + StorageValue, QueryKind = ValueQuery>; #[pallet::hooks] impl Hooks> for Pallet { - fn on_initialize(_: T::BlockNumber) -> Weight + fn on_initialize(_: BlockNumberFor) -> Weight where - ::BlockNumber: From, + BlockNumberFor: From, { let mut consumed_weight = Weight::zero(); let mut add_weight = |reads, writes, weight| { @@ -120,7 +121,7 @@ pub mod pallet { let block_interval: u32 = T::InflationBlockInterval::get().try_into().unwrap_or(0); let current_relay_block = T::BlockNumberProvider::current_block_number(); - let next_inflation: T::BlockNumber = >::get(); + let next_inflation: BlockNumberFor = >::get(); add_weight(1, 0, Weight::from_parts(5_000_000, 0)); // Apply inflation every InflationBlockInterval blocks @@ -129,7 +130,7 @@ pub mod pallet { // Recalculate inflation on the first block of the year (or if it is not initialized yet) // Do the "current_relay_block >= next_recalculation" check in the "current_relay_block >= next_inflation" // block because it saves InflationBlockInterval DB reads for NextRecalculationBlock. - let next_recalculation: T::BlockNumber = >::get(); + let next_recalculation: BlockNumberFor = >::get(); add_weight(1, 0, Weight::zero()); if current_relay_block >= next_recalculation { Self::recalculate_inflation(next_recalculation); @@ -169,10 +170,10 @@ pub mod pallet { #[pallet::weight(Weight::from_parts(0, 0))] pub fn start_inflation( origin: OriginFor, - inflation_start_relay_block: T::BlockNumber, + inflation_start_relay_block: BlockNumberFor, ) -> DispatchResult where - ::BlockNumber: From, + BlockNumberFor: From, { ensure_root(origin)?; @@ -200,9 +201,9 @@ pub mod pallet { } impl Pallet { - pub fn recalculate_inflation(recalculation_block: T::BlockNumber) { + pub fn recalculate_inflation(recalculation_block: BlockNumberFor) { let current_year: u32 = ((recalculation_block - >::get()) - / T::BlockNumber::from(YEAR)) + / BlockNumberFor::::from(YEAR)) .try_into() .unwrap_or(0); let block_interval: u32 = T::InflationBlockInterval::get().try_into().unwrap_or(0); diff --git a/pallets/inflation/src/tests.rs b/pallets/inflation/src/tests.rs index 72b247ff08..0e0a91767d 100644 --- a/pallets/inflation/src/tests.rs +++ b/pallets/inflation/src/tests.rs @@ -57,21 +57,16 @@ impl pallet_balances::Config for Test { type MaxLocks = MaxLocks; type MaxReserves = (); type ReserveIdentifier = (); - type HoldIdentifier = (); type FreezeIdentifier = (); type MaxHolds = (); type MaxFreezes = (); } frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - Balances: pallet_balances::{Pallet, Call, Storage}, - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Inflation: pallet_inflation::{Pallet, Call, Storage}, + pub enum Test { + Balances: pallet_balances, + System: frame_system, + Inflation: pallet_inflation, } ); @@ -89,13 +84,11 @@ impl frame_system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; type RuntimeEvent = (); type BlockHashCount = BlockHashCount; type Version = (); @@ -112,11 +105,11 @@ impl frame_system::Config for Test { parameter_types! { pub TreasuryAccountId: u64 = 1234; pub const InflationBlockInterval: u32 = 100; // every time per how many blocks inflation is applied - pub static MockBlockNumberProvider: u64 = 0; + pub static MockBlockNumberProvider: u32 = 0; } impl BlockNumberProvider for MockBlockNumberProvider { - type BlockNumber = u64; + type BlockNumber = u32; fn current_block_number() -> Self::BlockNumber { Self::get() @@ -131,8 +124,8 @@ impl pallet_inflation::Config for Test { } pub fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default() - .build_storage::() + >::default() + .build_storage() .unwrap() .into() } diff --git a/pallets/nonfungible/src/lib.rs b/pallets/nonfungible/src/lib.rs index 02ad2165f0..bfbe4c6ad9 100644 --- a/pallets/nonfungible/src/lib.rs +++ b/pallets/nonfungible/src/lib.rs @@ -90,37 +90,37 @@ #![cfg_attr(not(feature = "std"), no_std)] +use core::ops::Deref; + use erc::ERC721Events; use evm_coder::ToLog; use frame_support::{ - BoundedVec, ensure, fail, transactional, + dispatch::{Pays, PostDispatchInfo}, + ensure, fail, + pallet_prelude::*, storage::with_transaction, - pallet_prelude::DispatchResultWithPostInfo, - pallet_prelude::Weight, - dispatch::{PostDispatchInfo, Pays}, -}; -use up_data_structs::{ - AccessMode, CollectionId, CustomDataLimit, TokenId, CreateCollectionData, CreateNftExData, - mapping::TokenAddressMapping, budget::Budget, Property, PropertyKey, PropertyValue, - PropertyKeyPermission, PropertyScope, TokenChild, AuxPropertyValue, PropertiesPermissionMap, - TokenProperties as TokenPropertiesT, + transactional, BoundedVec, }; -use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; +pub use pallet::*; use pallet_common::{ Error as CommonError, Pallet as PalletCommon, Event as CommonEvent, CollectionHandle, eth::collection_id_to_address, SelfWeightOf as PalletCommonWeightOf, weights::WeightInfo as CommonWeightInfo, helpers::add_weight_to_post_info, }; -use pallet_structure::{Pallet as PalletStructure, Error as StructureError}; +use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; use pallet_evm_coder_substrate::{SubstrateRecorder, WithRecorder}; +use pallet_structure::{Error as StructureError, Pallet as PalletStructure}; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; use sp_core::{Get, H160}; use sp_runtime::{ArithmeticError, DispatchError, DispatchResult, TransactionOutcome}; -use sp_std::{vec::Vec, vec, collections::btree_map::BTreeMap}; -use core::ops::Deref; -use codec::{Encode, Decode, MaxEncodedLen}; -use scale_info::TypeInfo; - -pub use pallet::*; +use sp_std::{collections::btree_map::BTreeMap, vec, vec::Vec}; +use up_data_structs::{ + AccessMode, CollectionId, CustomDataLimit, TokenId, CreateCollectionData, CreateNftExData, + mapping::TokenAddressMapping, budget::Budget, Property, PropertyKey, PropertyValue, + PropertyKeyPermission, PropertyScope, TokenChild, AuxPropertyValue, PropertiesPermissionMap, + TokenProperties as TokenPropertiesT, +}; use weights::WeightInfo; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; @@ -147,12 +147,12 @@ pub struct ItemData { #[frame_support::pallet] pub mod pallet { - use super::*; use frame_support::{ - Blake2_128Concat, Twox64Concat, pallet_prelude::*, storage::Key, traits::StorageVersion, + pallet_prelude::*, storage::Key, traits::StorageVersion, Blake2_128Concat, Twox64Concat, }; use up_data_structs::{CollectionId, TokenId}; - use super::weights::WeightInfo; + + use super::{weights::WeightInfo, *}; #[pallet::error] pub enum Error { @@ -285,7 +285,6 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig(PhantomData); - #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { Self(Default::default()) @@ -293,7 +292,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { StorageVersion::new(1).put::>(); } diff --git a/pallets/unique/src/lib.rs b/pallets/unique/src/lib.rs index 0b37621bf8..c61940d0f8 100644 --- a/pallets/unique/src/lib.rs +++ b/pallets/unique/src/lib.rs @@ -174,7 +174,7 @@ pub mod pallet { pub type CreateItemBasket = StorageMap< Hasher = Blake2_128Concat, Key = (CollectionId, T::AccountId), - Value = T::BlockNumber, + Value = BlockNumberFor, QueryKind = OptionQuery, >; /// Collection id (controlled?2), token id (controlled?2) @@ -185,7 +185,7 @@ pub mod pallet { Key1 = CollectionId, Hasher2 = Blake2_128Concat, Key2 = TokenId, - Value = T::BlockNumber, + Value = BlockNumberFor, QueryKind = OptionQuery, >; /// Collection id (controlled?2), owning user (real) @@ -196,7 +196,7 @@ pub mod pallet { Key1 = CollectionId, Hasher2 = Twox64Concat, Key2 = T::AccountId, - Value = T::BlockNumber, + Value = BlockNumberFor, QueryKind = OptionQuery, >; /// Collection id (controlled?2), token id (controlled?2) @@ -208,7 +208,7 @@ pub mod pallet { Key, Key, ), - Value = T::BlockNumber, + Value = BlockNumberFor, QueryKind = OptionQuery, >; //#endregion @@ -221,7 +221,7 @@ pub mod pallet { Key1 = CollectionId, Hasher2 = Blake2_128Concat, Key2 = TokenId, - Value = T::BlockNumber, + Value = BlockNumberFor, QueryKind = OptionQuery, >; @@ -233,7 +233,7 @@ pub mod pallet { Key1 = CollectionId, Hasher2 = Blake2_128Concat, Key2 = TokenId, - Value = T::BlockNumber, + Value = BlockNumberFor, QueryKind = OptionQuery, >; /// Last sponsoring of fungible tokens approval in a collection @@ -244,7 +244,7 @@ pub mod pallet { Key1 = CollectionId, Hasher2 = Twox64Concat, Key2 = T::AccountId, - Value = T::BlockNumber, + Value = BlockNumberFor, QueryKind = OptionQuery, >; /// Last sponsoring of RFT approval in a collection @@ -256,7 +256,7 @@ pub mod pallet { Key, Key, ), - Value = T::BlockNumber, + Value = BlockNumberFor, QueryKind = OptionQuery, >; diff --git a/primitives/common/src/constants.rs b/primitives/common/src/constants.rs index 390184190d..0ee8271b9b 100644 --- a/primitives/common/src/constants.rs +++ b/primitives/common/src/constants.rs @@ -28,14 +28,14 @@ pub const MILLISECS_PER_RELAY_BLOCK: u64 = 6000; pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; // These time units are defined in number of blocks. -pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); -pub const HOURS: BlockNumber = MINUTES * 60; -pub const DAYS: BlockNumber = HOURS * 24; +pub const MINUTES: u32 = 60_000 / (MILLISECS_PER_BLOCK as u32); +pub const HOURS: u32 = MINUTES * 60; +pub const DAYS: u32 = HOURS * 24; // These time units are defined in number of relay blocks. -pub const RELAY_MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_RELAY_BLOCK as BlockNumber); -pub const RELAY_HOURS: BlockNumber = RELAY_MINUTES * 60; -pub const RELAY_DAYS: BlockNumber = RELAY_HOURS * 24; +pub const RELAY_MINUTES: u32 = 60_000 / (MILLISECS_PER_RELAY_BLOCK as u32); +pub const RELAY_HOURS: u32 = RELAY_MINUTES * 60; +pub const RELAY_DAYS: u32 = RELAY_HOURS * 24; pub const MICROUNIQUE: Balance = 1_000_000_000_000; pub const MILLIUNIQUE: Balance = 1_000 * MICROUNIQUE; diff --git a/primitives/common/src/types.rs b/primitives/common/src/types.rs index 05197eaf3c..8cb4b266ce 100644 --- a/primitives/common/src/types.rs +++ b/primitives/common/src/types.rs @@ -37,10 +37,8 @@ pub mod opaque { Unknown(sp_std::vec::Vec), } - /// Opaque block header type. pub type Header = generic::Header; - /// Opaque block type. pub type Block = generic::Block; pub trait RuntimeInstance { @@ -71,7 +69,7 @@ pub type AccountIndex = u32; pub type Balance = u128; /// Index of a transaction in the chain. -pub type Index = u32; +pub type Nonce = u32; /// A hash of some data used by the chain. pub type Hash = sp_core::H256; diff --git a/primitives/data-structs/src/bondrewd_codec.rs b/primitives/data-structs/src/bondrewd_codec.rs index 94bd4655e2..991049220b 100644 --- a/primitives/data-structs/src/bondrewd_codec.rs +++ b/primitives/data-structs/src/bondrewd_codec.rs @@ -5,12 +5,14 @@ macro_rules! bondrewd_codec { ($T:ty) => { impl Encode for $T { - fn encode_to(&self, dest: &mut O) { + fn encode_to(&self, dest: &mut O) { dest.write(&self.into_bytes()) } } - impl codec::Decode for $T { - fn decode(from: &mut I) -> Result { + impl parity_scale_codec::Decode for $T { + fn decode( + from: &mut I, + ) -> Result { let mut bytes = [0; Self::BYTE_SIZE]; from.read(&mut bytes)?; Ok(Self::from_bytes(bytes)) diff --git a/primitives/data-structs/src/bounded.rs b/primitives/data-structs/src/bounded.rs index 13e2ceee7d..5e8d559fe0 100644 --- a/primitives/data-structs/src/bounded.rs +++ b/primitives/data-structs/src/bounded.rs @@ -26,13 +26,13 @@ use frame_support::{ }; /// [`serde`] implementations for [`BoundedVec`]. -#[cfg(feature = "serde1")] pub mod vec_serde { use core::convert::TryFrom; - use frame_support::{BoundedVec, traits::Get}; + + use frame_support::{traits::Get, BoundedVec}; use serde::{ - ser::{self, Serialize}, de::{self, Deserialize, Error}, + ser::{self, Serialize}, }; use sp_std::vec::Vec; @@ -66,17 +66,17 @@ where (v as &Vec).fmt(f) } -#[cfg(feature = "serde1")] #[allow(dead_code)] /// [`serde`] implementations for [`BoundedBTreeMap`]. pub mod map_serde { use core::convert::TryFrom; - use sp_std::collections::btree_map::BTreeMap; - use frame_support::{traits::Get, storage::bounded_btree_map::BoundedBTreeMap}; + + use frame_support::{storage::bounded_btree_map::BoundedBTreeMap, traits::Get}; use serde::{ - ser::{self, Serialize}, de::{self, Deserialize, Error}, + ser::{self, Serialize}, }; + use sp_std::collections::btree_map::BTreeMap; pub fn serialize( value: &BoundedBTreeMap, serializer: D, @@ -117,17 +117,17 @@ where (v as &BTreeMap).fmt(f) } -#[cfg(feature = "serde1")] #[allow(dead_code)] /// [`serde`] implementations for [`BoundedBTreeSet`]. pub mod set_serde { use core::convert::TryFrom; - use sp_std::collections::btree_set::BTreeSet; - use frame_support::{traits::Get, storage::bounded_btree_set::BoundedBTreeSet}; + + use frame_support::{storage::bounded_btree_set::BoundedBTreeSet, traits::Get}; use serde::{ - ser::{self, Serialize}, de::{self, Deserialize, Error}, + ser::{self, Serialize}, }; + use sp_std::collections::btree_set::BTreeSet; pub fn serialize( value: &BoundedBTreeSet, serializer: D, diff --git a/primitives/data-structs/src/lib.rs b/primitives/data-structs/src/lib.rs index ab2fad1ebb..ec9073a321 100644 --- a/primitives/data-structs/src/lib.rs +++ b/primitives/data-structs/src/lib.rs @@ -153,8 +153,9 @@ pub type CustomDataLimit = ConstU32; Default, TypeInfo, MaxEncodedLen, + Serialize, + Deserialize, )] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] pub struct CollectionId(pub u32); impl EncodeLike for CollectionId {} impl EncodeLike for u32 {} @@ -187,8 +188,9 @@ impl Deref for CollectionId { Default, TypeInfo, MaxEncodedLen, + Serialize, + Deserialize, )] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] pub struct TokenId(pub u32); impl EncodeLike for TokenId {} impl EncodeLike for u32 {} @@ -221,8 +223,7 @@ impl TryFrom for TokenId { /// Token data. #[struct_versioning::versioned(version = 2, upper)] -#[derive(Encode, Decode, Clone, PartialEq, TypeInfo)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive(Encode, Decode, Clone, PartialEq, TypeInfo, Serialize, Deserialize)] pub struct TokenData { /// Properties of token. pub properties: Vec, @@ -251,8 +252,9 @@ pub type DecimalPoints = u8; /// Collection can represent various types of tokens. /// Each collection can contain only one type of tokens at a time. /// This type helps to understand which tokens the collection contains. -#[derive(Encode, Decode, Eq, Debug, Clone, PartialEq, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, Eq, Debug, Clone, PartialEq, TypeInfo, MaxEncodedLen, Serialize, Deserialize, +)] pub enum CollectionMode { /// Non fungible tokens. NFT, @@ -279,8 +281,19 @@ pub trait SponsoringResolve { } /// Access mode for some token operations. -#[derive(Encode, Decode, Eq, Debug, Clone, Copy, PartialEq, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, + Decode, + Eq, + Debug, + Clone, + Copy, + PartialEq, + TypeInfo, + MaxEncodedLen, + Serialize, + Deserialize, +)] pub enum AccessMode { /// Access grant for owner and admins. Used as default. Normal, @@ -294,8 +307,9 @@ impl Default for AccessMode { } // TODO: remove in future. -#[derive(Encode, Decode, Eq, Debug, Clone, PartialEq, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, Eq, Debug, Clone, PartialEq, TypeInfo, MaxEncodedLen, Serialize, Deserialize, +)] pub enum SchemaVersion { ImageURL, Unique, @@ -307,16 +321,16 @@ impl Default for SchemaVersion { } // TODO: unused type -#[derive(Encode, Decode, Default, Debug, Clone, PartialEq, TypeInfo)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive(Encode, Decode, Default, Debug, Clone, PartialEq, TypeInfo, Serialize, Deserialize)] pub struct Ownership { pub owner: AccountId, pub fraction: u128, } /// The state of collection sponsorship. -#[derive(Encode, Decode, Debug, Clone, PartialEq, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, Debug, Clone, PartialEq, TypeInfo, MaxEncodedLen, Serialize, Deserialize, +)] pub enum SponsorshipState { /// The fees are applied to the transaction sender. Disabled, @@ -444,8 +458,7 @@ pub struct Collection { pub meta_update_permission: MetaUpdatePermission, } -#[derive(Debug, Encode, Decode, Clone, PartialEq, TypeInfo)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive(Debug, Encode, Decode, Clone, PartialEq, TypeInfo, Serialize, Deserialize)] pub struct RpcCollectionFlags { /// Is collection is foreign. pub foreign: bool, @@ -455,8 +468,7 @@ pub struct RpcCollectionFlags { /// Collection parameters, used in RPC calls (see [`Collection`] for the storage version). #[struct_versioning::versioned(version = 2, upper)] -#[derive(Debug, Encode, Decode, Clone, PartialEq, TypeInfo)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive(Debug, Encode, Decode, Clone, PartialEq, TypeInfo, Serialize, Deserialize)] pub struct RpcCollection { /// Collection owner account. pub owner: AccountId, @@ -538,8 +550,10 @@ impl From> for RpcCollection pub struct RawEncoded(Vec); -impl codec::Decode for RawEncoded { - fn decode(input: &mut I) -> Result { +impl parity_scale_codec::Decode for RawEncoded { + fn decode( + input: &mut I, + ) -> Result { let mut out = Vec::new(); while let Ok(v) = input.read_byte() { out.push(v); @@ -612,8 +626,18 @@ pub type CollectionPropertiesVec = BoundedVec>; /// Wraper for collections set allowing nest. -#[derive(Encode, Decode, Clone, PartialEq, TypeInfo, MaxEncodedLen, Derivative)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, Clone, PartialEq, TypeInfo, MaxEncodedLen, Derivative, Serialize, Deserialize, +)] #[derivative(Debug)] pub struct OwnerRestrictedSet( - #[cfg_attr(feature = "serde1", serde(with = "bounded::set_serde"))] + #[serde(with = "bounded::set_serde")] #[derivative(Debug(format_with = "bounded::set_debug"))] pub OwnerRestrictedSetInner, ); @@ -862,8 +897,9 @@ impl TryFrom> for OwnerRestrictedSet { } /// Part of collection permissions, if set, defines who is able to nest tokens into other tokens. -#[derive(Encode, Decode, Clone, PartialEq, TypeInfo, MaxEncodedLen, Derivative)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, Clone, PartialEq, TypeInfo, MaxEncodedLen, Derivative, Serialize, Deserialize, +)] #[derivative(Debug)] pub struct NestingPermissions { /// Owner of token can nest tokens under it. @@ -881,8 +917,9 @@ pub struct NestingPermissions { /// Enum denominating how often can sponsoring occur if it is enabled. /// /// Used for [`collection limits`](CollectionLimits). -#[derive(Encode, Decode, Debug, Clone, Copy, PartialEq, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, Debug, Clone, Copy, PartialEq, TypeInfo, MaxEncodedLen, Serialize, Deserialize, +)] pub enum SponsoringRateLimit { /// Sponsoring is disabled, and the collection sponsor will not pay for transactions SponsoringDisabled, @@ -891,42 +928,73 @@ pub enum SponsoringRateLimit { } /// Data used to describe an NFT at creation. -#[derive(Encode, Decode, MaxEncodedLen, Default, PartialEq, Clone, Derivative, TypeInfo)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, + Decode, + MaxEncodedLen, + Default, + PartialEq, + Clone, + Derivative, + TypeInfo, + Serialize, + Deserialize, +)] #[derivative(Debug)] pub struct CreateNftData { /// Key-value pairs used to describe the token as metadata - #[cfg_attr(feature = "serde1", serde(with = "bounded::vec_serde"))] + #[serde(with = "bounded::vec_serde")] #[derivative(Debug(format_with = "bounded::vec_debug"))] /// Properties that wil be assignet to created item. pub properties: CollectionPropertiesVec, } /// Data used to describe a Fungible token at creation. -#[derive(Encode, Decode, MaxEncodedLen, Default, Debug, Clone, PartialEq, TypeInfo)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, + Decode, + MaxEncodedLen, + Default, + Debug, + Clone, + PartialEq, + TypeInfo, + Serialize, + Deserialize, +)] pub struct CreateFungibleData { /// Number of fungible coins minted pub value: u128, } /// Data used to describe a Refungible token at creation. -#[derive(Encode, Decode, MaxEncodedLen, Default, PartialEq, Clone, Derivative, TypeInfo)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, + Decode, + MaxEncodedLen, + Default, + PartialEq, + Clone, + Derivative, + TypeInfo, + Serialize, + Deserialize, +)] #[derivative(Debug)] pub struct CreateReFungibleData { /// Number of pieces the RFT is split into pub pieces: u128, /// Key-value pairs used to describe the token as metadata - #[cfg_attr(feature = "serde1", serde(with = "bounded::vec_serde"))] + #[serde(with = "bounded::vec_serde")] #[derivative(Debug(format_with = "bounded::vec_debug"))] pub properties: CollectionPropertiesVec, } // TODO: remove this. -#[derive(Encode, Decode, Debug, Clone, PartialEq, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, Debug, Clone, PartialEq, TypeInfo, MaxEncodedLen, Serialize, Deserialize, +)] pub enum MetaUpdatePermission { ItemOwner, Admin, @@ -935,8 +1003,9 @@ pub enum MetaUpdatePermission { /// Enum holding data used for creation of all three item types. /// Unified data for create item. -#[derive(Encode, Decode, MaxEncodedLen, PartialEq, Clone, Debug, TypeInfo)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, MaxEncodedLen, PartialEq, Clone, Debug, TypeInfo, Serialize, Deserialize, +)] pub enum CreateItemData { /// Data for create NFT. NFT(CreateNftData), @@ -1025,8 +1094,9 @@ impl From for CreateItemData { } /// Token's address, dictated by its collection and token IDs. -#[derive(Encode, Decode, MaxEncodedLen, PartialEq, Clone, Debug, TypeInfo)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, MaxEncodedLen, PartialEq, Clone, Debug, TypeInfo, Serialize, Deserialize, +)] // todo possibly rename to be used generally as an address pair pub struct TokenChild { /// Token id. @@ -1037,8 +1107,9 @@ pub struct TokenChild { } /// Collection statistics. -#[derive(Encode, Decode, MaxEncodedLen, PartialEq, Clone, Debug, TypeInfo)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, MaxEncodedLen, PartialEq, Clone, Debug, TypeInfo, Serialize, Deserialize, +)] pub struct CollectionStats { /// Number of created items. pub created: u32, @@ -1060,10 +1131,9 @@ impl TypeInfo for PhantomType { fn type_info() -> scale_info::Type { use scale_info::{ - Type, Path, build::{FieldsBuilder, UnnamedFields}, form::MetaForm, - type_params, + type_params, Path, Type, }; Type::builder() .path(Path::new("up_data_structs", "PhantomType")) @@ -1092,8 +1162,18 @@ pub type PropertyKey = BoundedBytes>; pub type PropertyValue = BoundedBytes>; /// Property permission. -#[derive(Encode, Decode, TypeInfo, Debug, MaxEncodedLen, PartialEq, Clone, Default)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, + Decode, + TypeInfo, + Debug, + MaxEncodedLen, + PartialEq, + Clone, + Default, + Serialize, + Deserialize, +)] pub struct PropertyPermission { /// Permission to change the property and property permission. /// @@ -1119,15 +1199,16 @@ impl PropertyPermission { } /// Property is simpl key-value record. -#[derive(Encode, Decode, Debug, TypeInfo, Clone, PartialEq, MaxEncodedLen)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, Debug, TypeInfo, Clone, PartialEq, MaxEncodedLen, Serialize, Deserialize, +)] pub struct Property { /// Property key. - #[cfg_attr(feature = "serde1", serde(with = "bounded::vec_serde"))] + #[serde(with = "bounded::vec_serde")] pub key: PropertyKey, /// Property value. - #[cfg_attr(feature = "serde1", serde(with = "bounded::vec_serde"))] + #[serde(with = "bounded::vec_serde")] pub value: PropertyValue, } @@ -1138,8 +1219,9 @@ impl From for (PropertyKey, PropertyValue) { } /// Record for proprty key permission. -#[derive(Encode, Decode, TypeInfo, Debug, MaxEncodedLen, PartialEq, Clone)] -#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[derive( + Encode, Decode, TypeInfo, Debug, MaxEncodedLen, PartialEq, Clone, Serialize, Deserialize, +)] pub struct PropertyKeyPermission { /// Key. #[cfg_attr(feature = "serde1", serde(with = "bounded::vec_serde"))] @@ -1362,7 +1444,7 @@ fn slice_size(data: &[u8]) -> u32 { scoped_slice_size(PropertyScope::None, data) } fn scoped_slice_size(scope: PropertyScope, data: &[u8]) -> u32 { - use codec::Compact; + use parity_scale_codec::Compact; let prefix = scope.prefix(); >::encoded_size(&Compact(data.len() as u32 + prefix.len() as u32)) as u32 + data.len() as u32 diff --git a/primitives/rpc/src/lib.rs b/primitives/rpc/src/lib.rs index a4fa45f876..8ef914ca52 100644 --- a/primitives/rpc/src/lib.rs +++ b/primitives/rpc/src/lib.rs @@ -82,7 +82,7 @@ sp_api::decl_runtime_apis! { collection: CollectionId, token_id: TokenId, keys: Option>> - ) -> Result>; + ) -> Result>; /// Total number of tokens in collection. fn total_supply(collection: CollectionId) -> Result; @@ -117,7 +117,7 @@ sp_api::decl_runtime_apis! { fn collection_by_id(collection: CollectionId) -> Result>>; #[changed_in(3)] - fn collection_by_id(collection: CollectionId) -> Result>; + fn collection_by_id(collection: CollectionId) -> Result>; /// Get collection stats. fn collection_stats() -> Result; diff --git a/runtime/common/config/pallets/collator_selection.rs b/runtime/common/config/pallets/collator_selection.rs index 7d15498e31..ef3863a5e1 100644 --- a/runtime/common/config/pallets/collator_selection.rs +++ b/runtime/common/config/pallets/collator_selection.rs @@ -107,6 +107,7 @@ parameter_types! { impl pallet_collator_selection::Config for Runtime { type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = RuntimeHoldReason; type Currency = Balances; // We allow root only to execute privileged collator selection operations. @@ -128,7 +129,6 @@ impl pallet_collator_selection::Config for Runtime { type ValidatorIdOf = pallet_collator_selection::IdentityCollator; type ValidatorRegistration = Session; type WeightInfo = pallet_collator_selection::weights::SubstrateWeight; - type LicenceBondIdentifier = LicenceBondIdentifier; type DesiredCollators = DesiredCollators; type LicenseBond = LicenseBond; type KickThreshold = KickThreshold; diff --git a/runtime/common/config/substrate.rs b/runtime/common/config/substrate.rs index c40a2e33e8..c0623bc0dd 100644 --- a/runtime/common/config/substrate.rs +++ b/runtime/common/config/substrate.rs @@ -76,10 +76,10 @@ impl frame_system::Config for Runtime { type BaseCallFilter = Everything; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; + /// The block type. + type Block = Block; /// The maximum length of a block (in bytes). type BlockLength = RuntimeBlockLength; - /// The index type for blocks. - type BlockNumber = BlockNumber; /// The weight of the overhead invoked on the block import process, independent of the extrinsics included in that block. type BlockWeights = RuntimeBlockWeights; /// The aggregated dispatch type that is available for extrinsics. @@ -92,10 +92,8 @@ impl frame_system::Config for Runtime { type Hash = Hash; /// The hashing algorithm used. type Hashing = BlakeTwo256; - /// The header type. - type Header = generic::Header; /// The index type for storing how many extrinsics an account has signed. - type Index = Index; + type Nonce = Nonce; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = AccountIdLookup; /// What to do if an account is fully reaped from the system. @@ -171,7 +169,7 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; - type HoldIdentifier = [u8; 16]; + type RuntimeHoldReason = RuntimeHoldReason; type FreezeIdentifier = [u8; 16]; type MaxHolds = MaxHolds; type MaxFreezes = MaxFreezes; @@ -247,6 +245,7 @@ impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type DisabledValidators = (); type MaxAuthorities = MaxAuthorities; + type AllowMultipleBlocksPerSlot = ConstBool; } impl pallet_utility::Config for Runtime { diff --git a/runtime/common/config/xcm/foreignassets.rs b/runtime/common/config/xcm/foreignassets.rs index bd66a27cf9..02f77f685a 100644 --- a/runtime/common/config/xcm/foreignassets.rs +++ b/runtime/common/config/xcm/foreignassets.rs @@ -36,17 +36,11 @@ parameter_types! { pub CheckingAccount: AccountId = PolkadotXcm::check_account(); } -pub struct AsInnerId(PhantomData<(AssetId, ConvertAssetId)>); -impl> - ConvertXcm for AsInnerId -where - AssetId: Borrow, - AssetId: TryAsForeign, - AssetIds: Borrow, +pub struct AsInnerId(PhantomData<(AssetId, ConvertAssetId)>); +impl> MaybeEquivalence + for AsInnerId { - fn convert_ref(id: impl Borrow) -> Result { - let id = id.borrow(); - + fn convert(id: &MultiLocation) -> Option { log::trace!( target: "xcm::AsInnerId::Convert", "AsInnerId {:?}", @@ -58,52 +52,46 @@ where let self_location = MultiLocation::new(1, X1(Parachain(ParachainInfo::get().into()))); if *id == parent { - return ConvertAssetId::convert_ref(AssetIds::NativeAssetId(NativeCurrency::Parent)); + return ConvertAssetId::convert(&AssetId::NativeAssetId(NativeCurrency::Parent)); } if *id == here || *id == self_location { - return ConvertAssetId::convert_ref(AssetIds::NativeAssetId(NativeCurrency::Here)); + return ConvertAssetId::convert(&AssetId::NativeAssetId(NativeCurrency::Here)); } match XcmForeignAssetIdMapping::::get_currency_id(*id) { - Some(AssetIds::ForeignAssetId(foreign_asset_id)) => { - ConvertAssetId::convert_ref(AssetIds::ForeignAssetId(foreign_asset_id)) + Some(AssetId::ForeignAssetId(foreign_asset_id)) => { + ConvertAssetId::convert(&AssetId::ForeignAssetId(foreign_asset_id)) } - _ => Err(()), + _ => None, } } - fn reverse_ref(what: impl Borrow) -> Result { + fn convert_back(asset_id: &AssetId) -> Option { log::trace!( target: "xcm::AsInnerId::Reverse", "AsInnerId", ); - let asset_id = what.borrow(); - let parent_id = - ConvertAssetId::convert_ref(AssetIds::NativeAssetId(NativeCurrency::Parent)).unwrap(); + ConvertAssetId::convert(&AssetId::NativeAssetId(NativeCurrency::Parent)).unwrap(); let here_id = - ConvertAssetId::convert_ref(AssetIds::NativeAssetId(NativeCurrency::Here)).unwrap(); + ConvertAssetId::convert(&AssetId::NativeAssetId(NativeCurrency::Here)).unwrap(); if asset_id.clone() == parent_id { - return Ok(MultiLocation::parent()); + return Some(MultiLocation::parent()); } if asset_id.clone() == here_id { - return Ok(MultiLocation::new( + return Some(MultiLocation::new( 1, X1(Parachain(ParachainInfo::get().into())), )); } - match >::try_as_foreign(asset_id.clone()) { - Some(fid) => match XcmForeignAssetIdMapping::::get_multi_location(fid) { - Some(location) => Ok(location), - None => Err(()), - }, - None => Err(()), - } + let fid = + >::try_as_foreign(asset_id.clone())?; + XcmForeignAssetIdMapping::::get_multi_location(fid) } } @@ -112,7 +100,7 @@ pub type FungiblesTransactor = FungiblesAdapter< // Use this fungibles implementation: ForeignAssets, // Use this currency when it is a fungible asset matching the given location or name: - ConvertedConcreteId, JustTry>, + ConvertedConcreteId, JustTry>, // Convert an XCM MultiLocation into a local account id: LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): @@ -154,7 +142,7 @@ impl TransactAsset for AssetTransactor { what: &MultiAsset, who: &MultiLocation, maybe_context: Option<&XcmContext>, - ) -> Result { + ) -> Result { FungiblesTransactor::withdraw_asset(what, who, maybe_context) } @@ -163,7 +151,7 @@ impl TransactAsset for AssetTransactor { from: &MultiLocation, to: &MultiLocation, context: &XcmContext, - ) -> Result { + ) -> Result { FungiblesTransactor::internal_transfer_asset(what, from, to, context) } } @@ -179,15 +167,15 @@ pub type Trader = FreeForAll< >; pub struct CurrencyIdConvert; -impl Convert> for CurrencyIdConvert { - fn convert(id: AssetIds) -> Option { +impl Convert> for CurrencyIdConvert { + fn convert(id: AssetId) -> Option { match id { - AssetIds::NativeAssetId(NativeCurrency::Here) => Some(MultiLocation::new( + AssetId::NativeAssetId(NativeCurrency::Here) => Some(MultiLocation::new( 1, X1(Parachain(ParachainInfo::get().into())), )), - AssetIds::NativeAssetId(NativeCurrency::Parent) => Some(MultiLocation::parent()), - AssetIds::ForeignAssetId(foreign_asset_id) => { + AssetId::NativeAssetId(NativeCurrency::Parent) => Some(MultiLocation::parent()), + AssetId::ForeignAssetId(foreign_asset_id) => { XcmForeignAssetIdMapping::::get_multi_location(foreign_asset_id) } } @@ -199,11 +187,11 @@ impl Convert> for CurrencyIdConvert { if location == MultiLocation::here() || location == MultiLocation::new(1, X1(Parachain(ParachainInfo::get().into()))) { - return Some(AssetIds::NativeAssetId(NativeCurrency::Here)); + return Some(AssetId::NativeAssetId(NativeCurrency::Here)); } if location == MultiLocation::parent() { - return Some(AssetIds::NativeAssetId(NativeCurrency::Parent)); + return Some(AssetId::NativeAssetId(NativeCurrency::Parent)); } if let Some(currency_id) = XcmForeignAssetIdMapping::::get_currency_id(location) { diff --git a/runtime/common/config/xcm/mod.rs b/runtime/common/config/xcm/mod.rs index 25eb8f2772..130355f6cb 100644 --- a/runtime/common/config/xcm/mod.rs +++ b/runtime/common/config/xcm/mod.rs @@ -153,10 +153,10 @@ where origin: &MultiLocation, message: &mut [Instruction], max_weight: Weight, - weight_credit: &mut Weight, + properties: &mut Properties, ) -> Result<(), ProcessMessageError> { Deny::try_pass(origin, message)?; - Allow::should_execute(origin, message, max_weight, weight_credit) + Allow::should_execute(origin, message, max_weight, properties) } } @@ -211,7 +211,7 @@ impl Contains for XcmCallFilter { } pub struct XcmExecutorConfig(PhantomData); -impl xcm_executor::Config for XcmExecutorConfig +impl staging_xcm_executor::Config for XcmExecutorConfig where T: pallet_configuration::Config, { @@ -240,6 +240,7 @@ where type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = XcmCallFilter; + type Aliasers = Nothing; } #[cfg(feature = "runtime-benchmarks")] diff --git a/runtime/common/config/xcm/nativeassets.rs b/runtime/common/config/xcm/nativeassets.rs index 6ea43b41c0..c721eb4385 100644 --- a/runtime/common/config/xcm/nativeassets.rs +++ b/runtime/common/config/xcm/nativeassets.rs @@ -106,7 +106,12 @@ impl< Self(Weight::from_parts(0, 0), Zero::zero(), PhantomData) } - fn buy_weight(&mut self, _weight: Weight, payment: Assets) -> Result { + fn buy_weight( + &mut self, + _weight: Weight, + payment: Assets, + _xcm: &XcmContext, + ) -> Result { Ok(payment) } } diff --git a/runtime/common/construct_runtime.rs b/runtime/common/construct_runtime.rs index f059ccabb1..276529bb65 100644 --- a/runtime/common/construct_runtime.rs +++ b/runtime/common/construct_runtime.rs @@ -19,11 +19,7 @@ macro_rules! construct_runtime { () => { frame_support::construct_runtime! { - pub enum Runtime where - Block = Block, - NodeBlock = opaque::Block, - UncheckedExtrinsic = UncheckedExtrinsic - { + pub enum Runtime { System: frame_system = 0, StateTrieMigration: pallet_state_trie_migration = 1, diff --git a/runtime/common/mod.rs b/runtime/common/mod.rs index 939cc8455c..3a5d4040f4 100644 --- a/runtime/common/mod.rs +++ b/runtime/common/mod.rs @@ -63,12 +63,15 @@ macro_rules! unsupported { /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; -/// Block header type as expected by this runtime. -pub type Header = generic::Header; -/// Block type as expected by this runtime. -pub type Block = generic::Block; /// A Block signed with a Justification pub type SignedBlock = generic::SignedBlock; +/// Frontier wrapped extrinsic +pub type UncheckedExtrinsic = + fp_self_contained::UncheckedExtrinsic; +/// Header type. +pub type Header = generic::Header; +/// Block type. +pub type Block = generic::Block; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; @@ -103,14 +106,6 @@ pub type SignedExtra = ( pallet_ethereum::FakeTransactionFinalizer, ); -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - fp_self_contained::UncheckedExtrinsic; - -/// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = - fp_self_contained::CheckedExtrinsic; - /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, diff --git a/runtime/common/sponsoring.rs b/runtime/common/sponsoring.rs index a5ceb371c6..74f894040a 100644 --- a/runtime/common/sponsoring.rs +++ b/runtime/common/sponsoring.rs @@ -79,7 +79,7 @@ pub fn withdraw_set_token_property( return None; } - let block_number = >::block_number() as T::BlockNumber; + let block_number = >::block_number() as BlockNumberFor; let limit = collection.limits.sponsored_data_rate_limit()?; if let Some(last_tx_block) = TokenPropertyBasket::::get(collection.id, item_id) { @@ -123,7 +123,7 @@ pub fn withdraw_transfer( } // sponsor timeout - let block_number = >::block_number() as T::BlockNumber; + let block_number = >::block_number() as BlockNumberFor; let limit = collection .limits .sponsor_transfer_timeout(match collection.mode { @@ -169,7 +169,7 @@ pub fn withdraw_create_item( properties: &CreateItemData, ) -> Option<()> { // sponsor timeout - let block_number = >::block_number() as T::BlockNumber; + let block_number = >::block_number() as BlockNumberFor; let limit = collection .limits .sponsor_transfer_timeout(match properties { @@ -195,7 +195,7 @@ pub fn withdraw_approve( item_id: &TokenId, ) -> Option<()> { // sponsor timeout - let block_number = >::block_number() as T::BlockNumber; + let block_number = >::block_number() as BlockNumberFor; let limit = collection.limits.sponsor_approve_timeout(); let last_tx_block = match collection.mode { @@ -307,7 +307,7 @@ where pub trait SponsorshipPredict { fn predict(collection: CollectionId, account: T::CrossAccountId, token: TokenId) -> Option where - u64: From<::BlockNumber>; + u64: From>; } pub struct UniqueSponsorshipPredict(PhantomData); @@ -315,13 +315,13 @@ pub struct UniqueSponsorshipPredict(PhantomData); impl SponsorshipPredict for UniqueSponsorshipPredict { fn predict(collection_id: CollectionId, who: T::CrossAccountId, token: TokenId) -> Option where - u64: From<::BlockNumber>, + u64: From>, { let collection = >::try_get(collection_id).ok()?; let _ = collection.sponsorship.sponsor()?; // sponsor timeout - let block_number = >::block_number() as T::BlockNumber; + let block_number = >::block_number() as BlockNumberFor; let limit = collection .limits .sponsor_transfer_timeout(match collection.mode { diff --git a/runtime/common/tests/mod.rs b/runtime/common/tests/mod.rs index da9cb73104..fb07dad238 100644 --- a/runtime/common/tests/mod.rs +++ b/runtime/common/tests/mod.rs @@ -51,8 +51,8 @@ fn last_events(n: usize) -> Vec { fn new_test_ext(balances: Vec<(AccountId, Balance)>) -> sp_io::TestExternalities { let mut storage = make_basic_storage(); - pallet_balances::GenesisConfig:: { balances } - .assimilate_storage(&mut storage) + pallet_balances::BuildGenesisConfig:: { balances } + .build_storage(&mut storage) .unwrap(); let mut ext = sp_io::TestExternalities::new(storage); @@ -94,13 +94,14 @@ fn make_basic_storage() -> Storage { .map(|acc| get_account_id_from_seed::(acc)) .collect::>(); - let cfg = GenesisConfig { + let cfg = BuildGenesisConfig { collator_selection: CollatorSelectionConfig { invulnerables }, session: SessionConfig { keys }, parachain_info: ParachainInfoConfig { parachain_id: PARA_ID.into(), + ..Default::default() }, - ..GenesisConfig::default() + ..Default::default() }; cfg.build_storage().unwrap() @@ -110,7 +111,7 @@ fn make_basic_storage() -> Storage { fn make_basic_storage() -> Storage { use crate::AuraConfig; - let cfg = GenesisConfig { + let cfg = BuildGenesisConfig { aura: AuraConfig { authorities: vec![ get_from_seed::("Alice"), @@ -119,8 +120,9 @@ fn make_basic_storage() -> Storage { }, parachain_info: ParachainInfoConfig { parachain_id: PARA_ID.into(), + ..Default::default() }, - ..GenesisConfig::default() + ..Default::default() }; cfg.build_storage().unwrap().into() diff --git a/runtime/tests/src/lib.rs b/runtime/tests/src/lib.rs index 0ec974d0f1..30e08e9203 100644 --- a/runtime/tests/src/lib.rs +++ b/runtime/tests/src/lib.rs @@ -57,23 +57,19 @@ mod tests; // Configure a mock runtime to test the pallet. frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { + pub enum Test { System: frame_system, Timestamp: pallet_timestamp, - Unique: pallet_unique::{Pallet, Call, Storage}, - Balances: pallet_balances::{Pallet, Call, Storage, Event}, - Common: pallet_common::{Pallet, Storage, Event}, - Fungible: pallet_fungible::{Pallet, Storage}, - Refungible: pallet_refungible::{Pallet, Storage}, - Nonfungible: pallet_nonfungible::{Pallet, Storage}, - Structure: pallet_structure::{Pallet, Storage, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, - Ethereum: pallet_ethereum::{Pallet, Config, Call, Storage, Event, Origin}, - EVM: pallet_evm::{Pallet, Config, Call, Storage, Event}, + Unique: pallet_unique, + Balances: pallet_balances, + Common: pallet_common, + Fungible: pallet_fungible, + Refungible: pallet_refungible, + Nonfungible: pallet_nonfungible, + Structure: pallet_structure, + TransactionPayment: pallet_transaction_payment, + Ethereum: pallet_ethereum, + EVM: pallet_evm, } ); @@ -90,13 +86,11 @@ impl system::Config for Test { type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; @@ -127,7 +121,6 @@ impl pallet_balances::Config for Test { type MaxFreezes = MaxLocks; type FreezeIdentifier = [u8; 8]; type MaxHolds = MaxLocks; - type HoldIdentifier = [u8; 8]; } parameter_types! { @@ -242,7 +235,6 @@ impl pallet_evm::Config for Test { type OnChargeTransaction = (); type FindAuthor = (); type BlockHashMapping = SubstrateBlockHashMapping; - type TransactionValidityHack = (); type Timestamp = Timestamp; type GasLimitPovSizeRatio = ConstU64<0>; } From 4dd148686c6b73c7ecbcd56ec738899f8a1c8a41 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:07:28 +0200 Subject: [PATCH 095/143] refactor: reorganize imports --- .rustfmt.toml | 3 +- client/rpc/src/lib.rs | 33 +++--- client/rpc/src/pov_estimate.rs | 32 ++---- crates/struct-versioning/src/lib.rs | 7 +- node/cli/src/chain_spec.rs | 31 +++--- node/cli/src/cli.rs | 4 +- node/cli/src/command.rs | 36 +++--- node/cli/src/lib.rs | 18 --- node/cli/src/main.rs | 1 + node/cli/src/service.rs | 105 +++++++++--------- pallets/app-promotion/src/benchmarking.rs | 15 ++- pallets/app-promotion/src/lib.rs | 40 +++---- pallets/app-promotion/src/types.rs | 13 +-- pallets/balances-adapter/src/common.rs | 4 +- pallets/balances-adapter/src/erc.rs | 7 +- pallets/balances-adapter/src/lib.rs | 10 +- .../collator-selection/src/benchmarking.rs | 9 +- pallets/collator-selection/src/lib.rs | 15 +-- pallets/collator-selection/src/mock.rs | 5 +- pallets/collator-selection/src/tests.rs | 6 +- pallets/common/src/benchmarking.rs | 28 ++--- pallets/common/src/dispatch.rs | 6 +- pallets/common/src/erc.rs | 19 ++-- pallets/common/src/eth.rs | 11 +- pallets/common/src/helpers.rs | 2 +- pallets/common/src/lib.rs | 53 ++++----- pallets/configuration/src/benchmarking.rs | 5 +- pallets/configuration/src/lib.rs | 27 ++--- .../evm-coder-substrate/procedural/src/lib.rs | 6 +- pallets/evm-coder-substrate/src/execution.rs | 3 +- pallets/evm-coder-substrate/src/lib.rs | 33 +++--- pallets/evm-contract-helpers/src/eth.rs | 25 +++-- pallets/evm-contract-helpers/src/lib.rs | 15 +-- pallets/evm-migration/src/benchmarking.rs | 5 +- pallets/evm-migration/src/lib.rs | 10 +- pallets/evm-transaction-payment/src/lib.rs | 19 +++- pallets/foreign-assets/src/benchmarking.rs | 13 +-- pallets/foreign-assets/src/impl_fungibles.rs | 13 +-- pallets/foreign-assets/src/lib.rs | 21 ++-- pallets/fungible/src/benchmarking.rs | 13 +-- pallets/fungible/src/common.rs | 20 ++-- pallets/fungible/src/erc.rs | 27 ++--- pallets/fungible/src/lib.rs | 33 +++--- pallets/gov-origins/src/lib.rs | 1 - pallets/identity/src/benchmarking.rs | 8 +- pallets/identity/src/lib.rs | 14 +-- pallets/identity/src/tests.rs | 9 +- pallets/identity/src/types.rs | 5 +- pallets/inflation/src/benchmarking.rs | 6 +- pallets/inflation/src/lib.rs | 18 ++- pallets/inflation/src/tests.rs | 8 +- pallets/maintenance/src/benchmarking.rs | 10 +- pallets/maintenance/src/lib.rs | 10 +- pallets/nonfungible/src/benchmarking.rs | 12 +- pallets/nonfungible/src/common.rs | 18 +-- pallets/nonfungible/src/erc.rs | 38 +++---- pallets/refungible/src/benchmarking.rs | 14 +-- pallets/refungible/src/common.rs | 27 ++--- pallets/refungible/src/erc.rs | 23 ++-- pallets/refungible/src/erc_token.rs | 18 +-- pallets/refungible/src/lib.rs | 31 +++--- pallets/structure/src/benchmarking.rs | 14 +-- pallets/structure/src/lib.rs | 24 ++-- pallets/unique/src/benchmarking.rs | 19 ++-- pallets/unique/src/eth/mod.rs | 21 ++-- pallets/unique/src/lib.rs | 32 +++--- primitives/app_promotion_rpc/src/lib.rs | 6 +- primitives/common/src/constants.rs | 9 +- primitives/common/src/types.rs | 4 +- primitives/data-structs/src/bounded.rs | 8 +- primitives/data-structs/src/lib.rs | 23 ++-- primitives/data-structs/src/mapping.rs | 2 +- primitives/data-structs/src/migration.rs | 6 +- primitives/pov-estimate-rpc/src/lib.rs | 4 +- primitives/rpc/src/lib.rs | 11 +- runtime/common/config/ethereum.rs | 22 ++-- .../common/config/governance/fellowship.rs | 7 +- runtime/common/config/governance/mod.rs | 26 +++-- runtime/common/config/orml.rs | 26 ++--- .../common/config/pallets/app_promotion.rs | 12 +- .../config/pallets/collator_selection.rs | 24 ++-- .../common/config/pallets/foreign_asset.rs | 3 +- runtime/common/config/pallets/mod.rs | 31 +++--- runtime/common/config/pallets/preimage.rs | 3 +- runtime/common/config/pallets/scheduler.rs | 18 +-- runtime/common/config/parachain.rs | 5 +- runtime/common/config/sponsoring.rs | 6 +- runtime/common/config/substrate.rs | 29 ++--- runtime/common/config/test_pallets.rs | 2 +- runtime/common/config/xcm/foreignassets.rs | 25 ++--- runtime/common/config/xcm/mod.rs | 37 +++--- runtime/common/config/xcm/nativeassets.rs | 31 +++--- runtime/common/dispatch.rs | 31 +++--- runtime/common/ethereum/precompiles/mod.rs | 7 +- .../common/ethereum/precompiles/sr25519.rs | 3 +- .../common/ethereum/precompiles/utils/data.rs | 8 +- .../precompiles/utils/macro/src/lib.rs | 3 +- .../common/ethereum/precompiles/utils/mod.rs | 3 +- .../common/ethereum/self_contained_call.rs | 7 +- runtime/common/ethereum/sponsoring.rs | 31 +++--- .../common/ethereum/sponsoring/refungible.rs | 13 +-- runtime/common/identity.rs | 13 +-- runtime/common/instance.rs | 6 +- runtime/common/maintenance.rs | 10 +- runtime/common/mod.rs | 20 ++-- runtime/common/scheduler.rs | 17 ++- runtime/common/sponsoring.rs | 30 ++--- runtime/common/tests/mod.rs | 11 +- runtime/common/tests/xcm.rs | 13 ++- runtime/common/weights/mod.rs | 17 +-- runtime/opal/src/lib.rs | 7 +- runtime/opal/src/xcm_barrier.rs | 2 +- runtime/quartz/src/lib.rs | 7 +- runtime/quartz/src/xcm_barrier.rs | 4 +- runtime/tests/src/lib.rs | 25 ++--- runtime/tests/src/tests.rs | 28 +++-- runtime/unique/src/lib.rs | 7 +- runtime/unique/src/xcm_barrier.rs | 4 +- test-pallets/utils/src/lib.rs | 7 +- 119 files changed, 938 insertions(+), 957 deletions(-) delete mode 100644 node/cli/src/lib.rs diff --git a/.rustfmt.toml b/.rustfmt.toml index 3f85f360c0..f064c2802b 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -1,2 +1,3 @@ +group_imports = "stdexternalcrate" hard_tabs = true -reorder_imports = false +imports_granularity = "crate" diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index b861f186e5..fd117b56ea 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -17,23 +17,19 @@ // Original License use std::sync::Arc; -use codec::Decode; -use jsonrpsee::{ - core::{RpcResult as Result}, - proc_macros::rpc, -}; use anyhow::anyhow; +use app_promotion_rpc::AppPromotionApi as AppPromotionRuntimeApi; +pub use app_promotion_unique_rpc::AppPromotionApiServer; +use jsonrpsee::{core::RpcResult as Result, proc_macros::rpc}; +use parity_scale_codec::Decode; +use sp_api::{ApiExt, BlockT, ProvideRuntimeApi}; +use sp_blockchain::HeaderBackend; use sp_runtime::traits::{AtLeast32BitUnsigned, Member}; use up_data_structs::{ - RpcCollection, CollectionId, CollectionStats, CollectionLimits, TokenId, Property, - PropertyKeyPermission, TokenData, TokenChild, + CollectionId, CollectionLimits, CollectionStats, Property, PropertyKeyPermission, + RpcCollection, TokenChild, TokenData, TokenId, }; -use sp_api::{BlockT, ProvideRuntimeApi, ApiExt}; -use sp_blockchain::HeaderBackend; use up_rpc::UniqueApi as UniqueRuntimeApi; -use app_promotion_rpc::AppPromotionApi as AppPromotionRuntimeApi; - -pub use app_promotion_unique_rpc::AppPromotionApiServer; #[cfg(feature = "pov-estimate")] pub mod pov_estimate; @@ -549,16 +545,16 @@ fn string_keys_to_bytes_keys(keys: Option>) -> Option>> keys.map(|keys| keys.into_iter().map(|key| key.into_bytes()).collect()) } -fn decode_collection_from_bytes( +fn decode_collection_from_bytes( bytes: &[u8], -) -> core::result::Result { - let mut reader = codec::IoReader(bytes); +) -> core::result::Result { + let mut reader = parity_scale_codec::IoReader(bytes); T::decode(&mut reader) } fn detect_type_and_decode_collection( bytes: &[u8], -) -> core::result::Result, codec::Error> { +) -> core::result::Result, parity_scale_codec::Error> { use up_data_structs::{CollectionVersion1, RpcCollectionVersion1}; decode_collection_from_bytes::>(bytes) @@ -574,11 +570,12 @@ fn detect_type_and_decode_collection( #[cfg(test)] mod tests { - use super::*; - use codec::IoReader; use hex_literal::hex; + use parity_scale_codec::IoReader; use up_data_structs::{CollectionVersion1, RawEncoded}; + use super::*; + const ENCODED_COLLECTION_V1: [u8; 180] = hex!("aab94a1ee784bc17f68d76d4d48d736916ca6ff6315b8c1fa1175726c8345a390000285000720069006d00610020004c00690076006500d04500730065006d00700069006f00200064006900200063007200650061007a0069006f006e006500200064006900200075006e00610020006e0075006f0076006100200063006f006c006c0065007a0069006f006e00650020006400690020004e004600540021000c464e5400000000000000000000000000000000"); const ENCODED_RPC_COLLECTION_V2: [u8; 618] = hex!("d00dcc24bf66750d3809aa26884b930ec8a3094d6f6f19fdc62020b2fbec013400604d0069006e007400460065007300740020002d002000460075006e006e007900200061006e0069006d0061006c0073008c430072006f00730073006f0076006500720020006200650074007700650065006e00200061006e0069006d0061006c00730020002d00200066006f0072002000660075006e00104d46464100000000000000000000010001000100000004385f6f6c645f636f6e7374446174610001000c5c5f6f6c645f636f6e73744f6e436861696e536368656d6139047b226e6573746564223a7b226f6e436861696e4d65746144617461223a7b226e6573746564223a7b224e46544d657461223a7b226669656c6473223a7b22697066734a736f6e223a7b226964223a312c2272756c65223a227265717569726564222c2274797065223a22737472696e67227d2c2248656164223a7b226964223a322c2272756c65223a227265717569726564222c2274797065223a22737472696e67227d2c22426f6479223a7b226964223a332c2272756c65223a227265717569726564222c2274797065223a22737472696e67227d2c225461696c223a7b226964223a342c2272756c65223a227265717569726564222c2274797065223a22737472696e67227d7d7d7d7d7d7d485f6f6c645f736368656d6156657273696f6e18556e69717565685f6f6c645f7661726961626c654f6e436861696e536368656d6111017b22636f6c6c656374696f6e436f766572223a22516d53557a7139354c357a556777795a584d3731576a3762786b36557048515468633162536965347766706e5435227d000000"); diff --git a/client/rpc/src/pov_estimate.rs b/client/rpc/src/pov_estimate.rs index f97ce1c78a..2697ed76f7 100644 --- a/client/rpc/src/pov_estimate.rs +++ b/client/rpc/src/pov_estimate.rs @@ -16,39 +16,31 @@ use std::sync::Arc; -use codec::{Encode, Decode}; -use sp_externalities::Extensions; - -use up_pov_estimate_rpc::{PovEstimateApi as PovEstimateRuntimeApi}; -use up_common::types::opaque::RuntimeId; - -use sc_service::{NativeExecutionDispatch, config::ExecutionStrategy}; -use sp_state_machine::{StateMachine, TrieBackendBuilder}; -use trie_db::{Trie, TrieDBBuilder}; - -use jsonrpsee::{core::RpcResult as Result, proc_macros::rpc}; use anyhow::anyhow; - +use jsonrpsee::{core::RpcResult as Result, proc_macros::rpc}; +use parity_scale_codec::{Decode, Encode}; use sc_client_api::backend::Backend; +use sc_executor::NativeElseWasmExecutor; +use sc_rpc_api::DenyUnsafe; +use sc_service::{config::ExecutionStrategy, NativeExecutionDispatch}; +use sp_api::{AsTrieBackend, BlockId, BlockT, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::{ - Bytes, offchain::{ testing::{TestOffchainExt, TestTransactionPoolExt}, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }, testing::TaskExecutor, traits::TaskExecutorExt, + Bytes, }; +use sp_externalities::Extensions; use sp_keystore::{testing::KeyStore, KeystoreExt}; -use sp_api::{AsTrieBackend, BlockId, BlockT, ProvideRuntimeApi}; - -use sc_executor::NativeElseWasmExecutor; -use sc_rpc_api::DenyUnsafe; - use sp_runtime::traits::Header; - -use up_pov_estimate_rpc::{PovInfo, TrieKeyValue}; +use sp_state_machine::{StateMachine, TrieBackendBuilder}; +use trie_db::{Trie, TrieDBBuilder}; +use up_common::types::opaque::RuntimeId; +use up_pov_estimate_rpc::{PovEstimateApi as PovEstimateRuntimeApi, PovInfo, TrieKeyValue}; use crate::define_struct_for_server_api; diff --git a/crates/struct-versioning/src/lib.rs b/crates/struct-versioning/src/lib.rs index c0d24263d6..3ae06fc503 100644 --- a/crates/struct-versioning/src/lib.rs +++ b/crates/struct-versioning/src/lib.rs @@ -17,13 +17,12 @@ #![doc = include_str!("../README.md")] use proc_macro::TokenStream; -use quote::format_ident; +use quote::{format_ident, quote}; use syn::{ - parse::{Parse, ParseStream}, - Token, LitInt, parse_macro_input, ItemStruct, Error, Fields, Result, Field, Expr, parenthesized, + parse::{Parse, ParseStream}, + parse_macro_input, Error, Expr, Field, Fields, ItemStruct, LitInt, Result, Token, }; -use quote::quote; mod kw { syn::custom_keyword!(version); diff --git a/node/cli/src/chain_spec.rs b/node/cli/src/chain_spec.rs index 5d5e0c4fd3..3efd24a42a 100644 --- a/node/cli/src/chain_spec.rs +++ b/node/cli/src/chain_spec.rs @@ -14,36 +14,35 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; -use sc_service::ChainType; -use sp_core::{sr25519, Pair, Public}; -use sp_runtime::traits::{IdentifyAccount, Verify}; use std::collections::BTreeMap; +#[cfg(all(not(feature = "unique-runtime"), not(feature = "quartz-runtime")))] +pub use opal_runtime as default_runtime; +#[cfg(all(not(feature = "unique-runtime"), feature = "quartz-runtime"))] +pub use quartz_runtime as default_runtime; +use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; +use sc_service::ChainType; use serde::{Deserialize, Serialize}; use serde_json::map::Map; - -use up_common::types::opaque::*; - +use sp_core::{sr25519, Pair, Public}; +use sp_runtime::traits::{IdentifyAccount, Verify}; #[cfg(feature = "unique-runtime")] pub use unique_runtime as default_runtime; - -#[cfg(all(not(feature = "unique-runtime"), feature = "quartz-runtime"))] -pub use quartz_runtime as default_runtime; - -#[cfg(all(not(feature = "unique-runtime"), not(feature = "quartz-runtime")))] -pub use opal_runtime as default_runtime; +use up_common::types::opaque::*; /// The `ChainSpec` parameterized for the unique runtime. #[cfg(feature = "unique-runtime")] -pub type UniqueChainSpec = sc_service::GenericChainSpec; +pub type UniqueChainSpec = + sc_service::GenericChainSpec; /// The `ChainSpec` parameterized for the quartz runtime. #[cfg(feature = "quartz-runtime")] -pub type QuartzChainSpec = sc_service::GenericChainSpec; +pub type QuartzChainSpec = + sc_service::GenericChainSpec; /// The `ChainSpec` parameterized for the opal runtime. -pub type OpalChainSpec = sc_service::GenericChainSpec; +pub type OpalChainSpec = + sc_service::GenericChainSpec; #[cfg(feature = "unique-runtime")] pub type DefaultChainSpec = UniqueChainSpec; diff --git a/node/cli/src/cli.rs b/node/cli/src/cli.rs index 3a9b612cb1..e3806cc0f9 100644 --- a/node/cli/src/cli.rs +++ b/node/cli/src/cli.rs @@ -14,10 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use crate::chain_spec; use std::path::PathBuf; + use clap::Parser; +use crate::chain_spec; + /// Sub-commands supported by the collator. #[derive(Debug, Parser)] pub enum Subcommand { diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 041331aa76..22a34fe61b 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -32,28 +32,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{ - chain_spec::{self, RuntimeIdentification, ServiceId, ServiceIdentification}, - cli::{Cli, RelayChainCli, Subcommand}, - service::{new_partial, start_node, start_dev_node}, -}; -#[cfg(feature = "runtime-benchmarks")] -use crate::chain_spec::default_runtime; - -#[cfg(feature = "unique-runtime")] -use crate::service::UniqueRuntimeExecutor; - -#[cfg(feature = "quartz-runtime")] -use crate::service::QuartzRuntimeExecutor; - -use crate::service::OpalRuntimeExecutor; - -#[cfg(feature = "runtime-benchmarks")] -use crate::service::DefaultRuntimeExecutor; +use std::time::Duration; use codec::Encode; -use cumulus_primitives_core::ParaId; use cumulus_client_cli::generate_genesis_block; +use cumulus_primitives_core::ParaId; use log::{debug, info}; use sc_cli::{ ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, @@ -62,9 +45,22 @@ use sc_cli::{ use sc_service::config::{BasePath, PrometheusConfig}; use sp_core::hexdisplay::HexDisplay; use sp_runtime::traits::{AccountIdConversion, Block as BlockT}; - use up_common::types::opaque::{Block, RuntimeId}; +#[cfg(feature = "runtime-benchmarks")] +use crate::chain_spec::default_runtime; +#[cfg(feature = "runtime-benchmarks")] +use crate::service::DefaultRuntimeExecutor; +#[cfg(feature = "quartz-runtime")] +use crate::service::QuartzRuntimeExecutor; +#[cfg(feature = "unique-runtime")] +use crate::service::UniqueRuntimeExecutor; +use crate::{ + chain_spec::{self, RuntimeIdentification, ServiceId, ServiceIdentification}, + cli::{Cli, RelayChainCli, Subcommand}, + service::{new_partial, start_dev_node, start_node, OpalRuntimeExecutor}, +}; + macro_rules! no_runtime_err { ($runtime_id:expr) => { format!( diff --git a/node/cli/src/lib.rs b/node/cli/src/lib.rs deleted file mode 100644 index aac2074691..0000000000 --- a/node/cli/src/lib.rs +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019-2022 Unique Network (Gibraltar) Ltd. -// This file is part of Unique Network. - -// Unique Network is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Unique Network is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Unique Network. If not, see . - -pub mod chain_spec; -pub mod service; diff --git a/node/cli/src/main.rs b/node/cli/src/main.rs index ec0cfb72ea..9ea1c705b3 100644 --- a/node/cli/src/main.rs +++ b/node/cli/src/main.rs @@ -19,6 +19,7 @@ mod chain_spec; mod service; mod cli; mod command; +mod rpc; fn main() -> sc_cli::Result<()> { command::run() diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index f48f011416..7c6c2a9620 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -15,73 +15,72 @@ // along with Unique Network. If not, see . // std -use std::sync::Arc; -use std::sync::Mutex; -use std::collections::BTreeMap; -use std::time::Duration; -use std::pin::Pin; -use fc_mapping_sync::EthereumBlockNotificationSinks; -use fc_rpc::EthBlockDataCacheTask; -use fc_rpc::EthTask; -use fc_rpc_core::types::FeeHistoryCache; -use futures::{ - Stream, StreamExt, - stream::select, - task::{Context, Poll}, +use std::{ + collections::BTreeMap, + marker::PhantomData, + pin::Pin, + sync::{Arc, Mutex}, + time::Duration, }; -use sc_rpc::SubscriptionTaskExecutor; -use sp_keystore::KeystorePtr; -use tokio::time::Interval; -use jsonrpsee::RpcModule; -use serde::{Serialize, Deserialize}; - -// Cumulus Imports -use cumulus_client_consensus_aura::{AuraConsensus, BuildAuraConsensusParams, SlotProportion}; -use cumulus_client_consensus_common::{ - ParachainConsensus, ParachainBlockImport as TParachainBlockImport, +use cumulus_client_cli::CollatorOptions; +use cumulus_client_collator::service::CollatorService; +#[cfg(not(feature = "lookahead"))] +use cumulus_client_consensus_aura::collators::basic::{ + run as run_aura, Params as BuildAuraConsensusParams, }; +#[cfg(feature = "lookahead")] +use cumulus_client_consensus_aura::collators::lookahead::{ + run as run_aura, Params as BuildAuraConsensusParams, +}; +use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; +use cumulus_client_consensus_proposer::Proposer; +use cumulus_client_network::RequireSecondedInBlockAnnounce; use cumulus_client_service::{ - prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, + build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, DARecoveryProfile, + StartRelayChainTasksParams, }; -use cumulus_client_cli::CollatorOptions; -use cumulus_client_network::BlockAnnounceValidator; use cumulus_primitives_core::ParaId; -use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; -use cumulus_relay_chain_interface::{RelayChainInterface, RelayChainResult}; -use cumulus_relay_chain_minimal_node::build_minimal_relay_chain_node; - -// Substrate Imports -use sp_api::{BlockT, HeaderT, ProvideRuntimeApi, StateBackend}; -use sc_executor::NativeElseWasmExecutor; -use sc_executor::NativeExecutionDispatch; +use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; +use fc_mapping_sync::{kv::MappingSyncWorker, EthereumBlockNotificationSinks, SyncStrategy}; +use fc_rpc::{ + frontier_backend_client::SystemAccountId32StorageOverride, EthBlockDataCacheTask, EthConfig, + EthTask, OverrideHandle, RuntimeApiStorageOverride, SchemaV1Override, SchemaV2Override, + SchemaV3Override, StorageOverride, +}; +use fc_rpc_core::types::{FeeHistoryCache, FilterPool}; +use fp_rpc::EthereumRuntimeRPCApi; +use fp_storage::EthereumStorageSchema; +use futures::{ + stream::select, + task::{Context, Poll}, + Stream, StreamExt, +}; +use jsonrpsee::RpcModule; +use polkadot_service::CollatorPair; +use sc_client_api::{AuxStore, Backend, BlockOf, BlockchainEvents, StorageProvider}; +use sc_consensus::ImportQueue; +use sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch}; use sc_network::NetworkBlock; use sc_network_sync::SyncingService; +use sc_rpc::SubscriptionTaskExecutor; use sc_service::{Configuration, PartialComponents, TaskManager}; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; +use serde::{Deserialize, Serialize}; +use sp_api::{ProvideRuntimeApi, StateBackend}; +use sp_block_builder::BlockBuilder; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sp_consensus_aura::sr25519::AuthorityPair as AuraAuthorityPair; +use sp_keystore::KeystorePtr; use sp_runtime::traits::BlakeTwo256; use substrate_prometheus_endpoint::Registry; -use sc_client_api::{BlockchainEvents, BlockOf, Backend, AuxStore, StorageProvider}; -use sp_blockchain::{HeaderBackend, HeaderMetadata, Error as BlockChainError}; -use sc_consensus::ImportQueue; -use sp_core::H256; -use sp_block_builder::BlockBuilder; - -use polkadot_service::CollatorPair; +use tokio::time::Interval; +use up_common::types::{opaque::*, Nonce}; -// Frontier Imports -use fc_rpc_core::types::FilterPool; -use fc_mapping_sync::{kv::MappingSyncWorker, SyncStrategy}; -use fc_rpc::{ - StorageOverride, OverrideHandle, SchemaV1Override, SchemaV2Override, SchemaV3Override, - RuntimeApiStorageOverride, +use crate::{ + chain_spec::RuntimeIdentification, + rpc::{create_eth, create_full, EthDeps, FullDeps}, }; -use fp_rpc::EthereumRuntimeRPCApi; -use fp_storage::EthereumStorageSchema; - -use up_common::types::opaque::*; - -use crate::chain_spec::RuntimeIdentification; /// Unique native executor instance. #[cfg(feature = "unique-runtime")] diff --git a/pallets/app-promotion/src/benchmarking.rs b/pallets/app-promotion/src/benchmarking.rs index bac9c0ce59..60e54b4b00 100644 --- a/pallets/app-promotion/src/benchmarking.rs +++ b/pallets/app-promotion/src/benchmarking.rs @@ -16,16 +16,15 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; -use crate::Pallet as PromototionPallet; -use frame_support::traits::fungible::Unbalanced; -use sp_runtime::traits::Bounded; - -use frame_benchmarking::{benchmarks, account}; -use frame_support::traits::OnInitialize; +use frame_benchmarking::{account, benchmarks}; +use frame_support::traits::{fungible::Unbalanced, OnInitialize}; use frame_system::RawOrigin; -use pallet_unique::benchmarking::create_nft_collection; use pallet_evm_migration::Pallet as EvmMigrationPallet; +use pallet_unique::benchmarking::create_nft_collection; +use sp_runtime::traits::Bounded; + +use super::*; +use crate::Pallet as PromototionPallet; const SEED: u32 = 0; diff --git a/pallets/app-promotion/src/lib.rs b/pallets/app-promotion/src/lib.rs index d4a633e59f..885f20ebb8 100644 --- a/pallets/app-promotion/src/lib.rs +++ b/pallets/app-promotion/src/lib.rs @@ -53,32 +53,32 @@ mod benchmarking; pub mod types; pub mod weights; -use sp_std::{vec::Vec, vec, iter::Sum, borrow::ToOwned, cell::RefCell}; -use sp_core::H160; -use codec::EncodeLike; -pub use types::*; - -use up_data_structs::CollectionId; - use frame_support::{ - dispatch::{DispatchResult}, + dispatch::DispatchResult, + ensure, + pallet_prelude::*, + storage::Key, traits::{ - Get, - tokens::Balance, fungible::{Inspect, InspectFreeze, Mutate, MutateFreeze}, + tokens::Balance, + Get, }, - ensure, BoundedVec, + weights::Weight, + Blake2_128Concat, BoundedVec, PalletId, Twox64Concat, }; - -use weights::WeightInfo; - +use frame_system::pallet_prelude::*; pub use pallet::*; use pallet_evm::account::CrossAccountId; +use parity_scale_codec::EncodeLike; +use sp_core::H160; use sp_runtime::{ - Perbill, - traits::{BlockNumberProvider, CheckedAdd, CheckedSub, AccountIdConversion, Zero}, - ArithmeticError, DispatchError, + traits::{AccountIdConversion, BlockNumberProvider, CheckedAdd, CheckedSub, Zero}, + ArithmeticError, DispatchError, Perbill, }; +use sp_std::{borrow::ToOwned, cell::RefCell, iter::Sum, vec, vec::Vec}; +pub use types::*; +use up_data_structs::CollectionId; +use weights::WeightInfo; const PENDING_LIMIT_PER_BLOCK: u32 = 3; @@ -87,12 +87,8 @@ type BalanceOf = #[frame_support::pallet] pub mod pallet { + use super::*; - use frame_support::{ - Blake2_128Concat, Twox64Concat, pallet_prelude::*, storage::Key, PalletId, weights::Weight, - }; - use frame_system::pallet_prelude::*; - use sp_runtime::DispatchError; #[pallet::config] pub trait Config: diff --git a/pallets/app-promotion/src/types.rs b/pallets/app-promotion/src/types.rs index 292efe55cd..dfc0528dc9 100644 --- a/pallets/app-promotion/src/types.rs +++ b/pallets/app-promotion/src/types.rs @@ -1,13 +1,12 @@ -use frame_support::{dispatch::DispatchResult}; - +use frame_support::dispatch::DispatchResult; +use frame_system::pallet_prelude::*; use pallet_common::CollectionHandle; - +use pallet_configuration::AppPromomotionConfigurationOverride; +use pallet_evm_contract_helpers::{Config as EvmHelpersConfig, Pallet as EvmHelpersPallet}; +use sp_core::Get; use sp_runtime::{DispatchError, Perbill}; -use up_data_structs::{CollectionId}; use sp_std::borrow::ToOwned; -use pallet_evm_contract_helpers::{Pallet as EvmHelpersPallet, Config as EvmHelpersConfig}; -use pallet_configuration::{AppPromomotionConfigurationOverride}; -use sp_core::Get; +use up_data_structs::CollectionId; const MAX_NUMBER_PAYOUTS: u8 = 100; pub(crate) const DEFAULT_NUMBER_PAYOUTS: u8 = 20; diff --git a/pallets/balances-adapter/src/common.rs b/pallets/balances-adapter/src/common.rs index efda782c0f..c81a837919 100644 --- a/pallets/balances-adapter/src/common.rs +++ b/pallets/balances-adapter/src/common.rs @@ -1,11 +1,13 @@ use alloc::{vec, vec::Vec}; use core::marker::PhantomData; -use crate::{Config, NativeFungibleHandle, Pallet}; + use frame_support::{fail, weights::Weight}; use pallet_balances::{weights::SubstrateWeight as BalancesWeight, WeightInfo}; use pallet_common::{CommonCollectionOperations, CommonWeightInfo}; use up_data_structs::TokenId; +use crate::{Config, NativeFungibleHandle, Pallet}; + pub struct CommonWeights(PhantomData); // All implementations with `Weight::default` used in methods that return error `UnsupportedOperation`. diff --git a/pallets/balances-adapter/src/erc.rs b/pallets/balances-adapter/src/erc.rs index b58de1e747..90b12327f9 100644 --- a/pallets/balances-adapter/src/erc.rs +++ b/pallets/balances-adapter/src/erc.rs @@ -1,4 +1,3 @@ -use crate::{Config, NativeFungibleHandle, Pallet, SelfWeightOf}; use evm_coder::{abi::AbiType, generate_stubgen, solidity_interface, types::*}; use pallet_balances::WeightInfo; use pallet_common::{ @@ -10,8 +9,10 @@ use pallet_evm_coder_substrate::{ execution::{PreDispatch, Result}, frontier_contract, WithRecorder, }; -use pallet_structure::{SelfWeightOf as StructureWeight, weights::WeightInfo as _}; -use sp_core::{U256, Get}; +use pallet_structure::{weights::WeightInfo as _, SelfWeightOf as StructureWeight}; +use sp_core::{Get, U256}; + +use crate::{Config, NativeFungibleHandle, Pallet, SelfWeightOf}; frontier_contract! { macro_rules! NativeFungibleHandle_result {...} diff --git a/pallets/balances-adapter/src/lib.rs b/pallets/balances-adapter/src/lib.rs index c7fc59ca29..91997c0b79 100644 --- a/pallets/balances-adapter/src/lib.rs +++ b/pallets/balances-adapter/src/lib.rs @@ -4,8 +4,8 @@ extern crate alloc; use core::ops::Deref; use frame_support::sp_runtime::DispatchResult; -use pallet_evm_coder_substrate::{WithRecorder, SubstrateRecorder}; pub use pallet::*; +use pallet_evm_coder_substrate::{SubstrateRecorder, WithRecorder}; pub mod common; pub mod erc; @@ -55,16 +55,16 @@ impl Deref for NativeFungibleHandle { } #[frame_support::pallet] pub mod pallet { - use super::*; use alloc::string::String; + use frame_support::{ dispatch::PostDispatchInfo, ensure, - pallet_prelude::{DispatchResultWithPostInfo, Pays}, + pallet_prelude::*, traits::{ - Get, fungible::{Inspect, Mutate}, tokens::Preservation, + Get, }, }; use pallet_balances::WeightInfo; @@ -74,6 +74,8 @@ pub mod pallet { use sp_runtime::DispatchError; use up_data_structs::{budget::Budget, mapping::TokenAddressMapping}; + use super::*; + #[pallet::config] pub trait Config: frame_system::Config diff --git a/pallets/collator-selection/src/benchmarking.rs b/pallets/collator-selection/src/benchmarking.rs index 85fe54c526..37838ee4d2 100644 --- a/pallets/collator-selection/src/benchmarking.rs +++ b/pallets/collator-selection/src/benchmarking.rs @@ -32,18 +32,13 @@ //! Benchmarking setup for pallet-collator-selection -use super::*; - -#[allow(unused)] -use crate::{Pallet as CollatorSelection, BalanceOf}; use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; use frame_support::{ assert_ok, - codec::Decode, + parity_scale_codec::Decode, traits::{ - EnsureOrigin, fungible::{Inspect, Mutate}, - Get, + EnsureOrigin, Get, }, }; use frame_system::{EventRecord, RawOrigin}; diff --git a/pallets/collator-selection/src/lib.rs b/pallets/collator-selection/src/lib.rs index 027b995e7e..b46ef84445 100644 --- a/pallets/collator-selection/src/lib.rs +++ b/pallets/collator-selection/src/lib.rs @@ -96,26 +96,27 @@ type BalanceOf = <::Currency as Inspect<::AccountId>>::Balance; #[frame_support::pallet] pub mod pallet { - use super::*; - pub use crate::weights::WeightInfo; use core::ops::Div; + use frame_support::{ dispatch::{DispatchClass, DispatchResultWithPostInfo}, - inherent::Vec, pallet_prelude::*, sp_runtime::traits::{AccountIdConversion, CheckedSub, Saturating, Zero}, traits::{ - EnsureOrigin, - fungible::{Balanced, BalancedHold, Inspect, InspectHold, Mutate, MutateHold}, - ValidatorRegistration, + fungible::{Balanced, BalancedHold, Inspect, Mutate, MutateHold}, tokens::{Precision, Preservation}, + EnsureOrigin, ValidatorRegistration, }, BoundedVec, PalletId, }; use frame_system::pallet_prelude::*; use pallet_session::SessionManager; - use sp_runtime::{Perbill, traits::Convert}; + use sp_runtime::{traits::Convert, Perbill}; use sp_staking::SessionIndex; + use sp_std::vec::Vec; + + use super::*; + pub use crate::weights::WeightInfo; /// A convertor from collators id. Since this pallet does not have stash/controller, this is /// just identity. diff --git a/pallets/collator-selection/src/mock.rs b/pallets/collator-selection/src/mock.rs index f273eeedde..5355bf2f0b 100644 --- a/pallets/collator-selection/src/mock.rs +++ b/pallets/collator-selection/src/mock.rs @@ -30,8 +30,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::*; -use crate as collator_selection; use frame_support::{ ord_parameter_types, parameter_types, traits::{FindAuthor, GenesisBuild, ValidatorRegistration}, @@ -46,6 +44,9 @@ use sp_runtime::{ Perbill, RuntimeAppPublic, }; +use super::*; +use crate as collator_selection; + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; diff --git a/pallets/collator-selection/src/tests.rs b/pallets/collator-selection/src/tests.rs index 86dc549cf3..b13a44e0f1 100644 --- a/pallets/collator-selection/src/tests.rs +++ b/pallets/collator-selection/src/tests.rs @@ -30,14 +30,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{self as collator_selection, Config}; -use crate::{mock::*, Error}; use frame_support::{ assert_noop, assert_ok, traits::{fungible, GenesisBuild, OnInitialize}, }; -use sp_runtime::{traits::BadOrigin, TokenError}; use scale_info::prelude::*; +use sp_runtime::{traits::BadOrigin, TokenError}; + +use crate::{self as collator_selection, mock::*, Config, Error}; fn get_license_and_onboard(account_id: ::AccountId) { assert_ok!(CollatorSelection::get_license(RuntimeOrigin::signed( diff --git a/pallets/common/src/benchmarking.rs b/pallets/common/src/benchmarking.rs index c7837be160..876fb3a095 100644 --- a/pallets/common/src/benchmarking.rs +++ b/pallets/common/src/benchmarking.rs @@ -16,23 +16,25 @@ #![allow(missing_docs)] -use sp_std::vec::Vec; -use crate::{Config, CollectionHandle, Pallet}; -use pallet_evm::account::CrossAccountId; -use frame_benchmarking::{benchmarks, account}; -use up_data_structs::{ - CollectionMode, CreateCollectionData, CollectionId, Property, PropertyKey, PropertyValue, - CollectionPermissions, NestingPermissions, AccessMode, PropertiesPermissionMap, - MAX_COLLECTION_NAME_LENGTH, MAX_COLLECTION_DESCRIPTION_LENGTH, MAX_TOKEN_PREFIX_LENGTH, - MAX_PROPERTIES_PER_ITEM, -}; +use core::convert::TryInto; + +use frame_benchmarking::{account, benchmarks}; use frame_support::{ - traits::{Get, fungible::Balanced, Imbalance, tokens::Precision}, pallet_prelude::ConstU32, + traits::{fungible::Balanced, tokens::Precision, Get, Imbalance}, BoundedVec, }; -use core::convert::TryInto; -use sp_runtime::{DispatchError, traits::Zero}; +use pallet_evm::account::CrossAccountId; +use sp_runtime::{traits::Zero, DispatchError}; +use sp_std::vec::Vec; +use up_data_structs::{ + AccessMode, CollectionId, CollectionMode, CollectionPermissions, CreateCollectionData, + NestingPermissions, PropertiesPermissionMap, Property, PropertyKey, PropertyValue, + MAX_COLLECTION_DESCRIPTION_LENGTH, MAX_COLLECTION_NAME_LENGTH, MAX_PROPERTIES_PER_ITEM, + MAX_TOKEN_PREFIX_LENGTH, +}; + +use crate::{CollectionHandle, Config, Pallet}; const SEED: u32 = 1; diff --git a/pallets/common/src/dispatch.rs b/pallets/common/src/dispatch.rs index fe51ea0563..52009d2be5 100644 --- a/pallets/common/src/dispatch.rs +++ b/pallets/common/src/dispatch.rs @@ -2,13 +2,13 @@ use frame_support::{ dispatch::{ - DispatchResultWithPostInfo, PostDispatchInfo, Weight, DispatchErrorWithPostInfo, - DispatchResult, + DispatchErrorWithPostInfo, DispatchResult, DispatchResultWithPostInfo, Pays, + PostDispatchInfo, }, - dispatch::Pays, traits::Get, }; use sp_runtime::DispatchError; +use sp_weights::Weight; use up_data_structs::{CollectionId, CreateCollectionData}; use crate::{pallet::Config, CommonCollectionOperations}; diff --git a/pallets/common/src/erc.rs b/pallets/common/src/erc.rs index f08b6d903e..4d0f946410 100644 --- a/pallets/common/src/erc.rs +++ b/pallets/common/src/erc.rs @@ -16,15 +16,17 @@ //! This module contains the implementation of pallet methods for evm. -pub use pallet_evm::{PrecompileOutput, PrecompileResult, PrecompileHandle, account::CrossAccountId}; +pub use pallet_evm::{ + account::CrossAccountId, PrecompileHandle, PrecompileOutput, PrecompileResult, +}; use pallet_evm_coder_substrate::{ abi::AbiType, - solidity_interface, ToLog, + dispatch_to_evm, + execution::{Error, PreDispatch, Result}, + frontier_contract, solidity_interface, types::*, - execution::{Result, Error, PreDispatch}, - frontier_contract, + ToLog, }; -use pallet_evm_coder_substrate::dispatch_to_evm; use sp_std::{vec, vec::Vec}; use up_data_structs::{ CollectionMode, CollectionPermissions, OwnerRestrictedSet, Property, SponsoringRateLimit, @@ -32,7 +34,7 @@ use up_data_structs::{ }; use crate::{ - Pallet, CollectionHandle, Config, CollectionProperties, eth, SelfWeightOf, weights::WeightInfo, + eth, weights::WeightInfo, CollectionHandle, CollectionProperties, Config, Pallet, SelfWeightOf, }; frontier_contract! { @@ -727,11 +729,10 @@ where /// Contains static property keys and values. pub mod static_property { - use pallet_evm_coder_substrate::{ - execution::{Result, Error}, - }; use alloc::format; + use pallet_evm_coder_substrate::execution::{Error, Result}; + const EXPECT_CONVERT_ERROR: &str = "length < limit"; /// Keys. diff --git a/pallets/common/src/eth.rs b/pallets/common/src/eth.rs index ba56ac4314..d6e8fb4c0d 100644 --- a/pallets/common/src/eth.rs +++ b/pallets/common/src/eth.rs @@ -17,15 +17,16 @@ //! The module contains a number of functions for converting and checking ethereum identifiers. use alloc::format; -use sp_std::{vec, vec::Vec}; + use evm_coder::{ - AbiCoder, types::{Address, String}, + AbiCoder, }; -pub use pallet_evm::{Config, account::CrossAccountId}; -use sp_core::{H160, U256}; -use up_data_structs::{CollectionId, CollectionFlags}; +pub use pallet_evm::{account::CrossAccountId, Config}; use pallet_evm_coder_substrate::execution::Error; +use sp_core::{H160, U256}; +use sp_std::{vec, vec::Vec}; +use up_data_structs::{CollectionFlags, CollectionId}; // 0x17c4e6453Cc49AAAaEACA894e6D9683e00000001 - collection 1 // TODO: Unhardcode prefix diff --git a/pallets/common/src/helpers.rs b/pallets/common/src/helpers.rs index 7cad9deb07..86676cf71f 100644 --- a/pallets/common/src/helpers.rs +++ b/pallets/common/src/helpers.rs @@ -3,9 +3,9 @@ //! The module contains helpers. //! use frame_support::{ + dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, pallet_prelude::DispatchResultWithPostInfo, weights::Weight, - dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, }; /// Add weight for a `DispatchResultWithPostInfo` diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index e28b90e30c..d4d29073c2 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -58,38 +58,38 @@ use core::{ slice::from_ref, marker::PhantomData, }; -use pallet_evm_coder_substrate::{SubstrateRecorder, WithRecorder}; -use sp_std::vec::Vec; -use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; + use evm_coder::ToLog; use frame_support::{ - dispatch::{DispatchErrorWithPostInfo, DispatchResultWithPostInfo, Weight, PostDispatchInfo}, - ensure, + dispatch::{DispatchErrorWithPostInfo, DispatchResultWithPostInfo, Pays, PostDispatchInfo}, + ensure, fail, traits::{ - Get, fungible::{Balanced, Debt, Inspect}, tokens::{Imbalance, Precision, Preservation}, + Get, }, - dispatch::Pays, - transactional, fail, + transactional, }; +pub use pallet::*; +use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; +use pallet_evm_coder_substrate::{SubstrateRecorder, WithRecorder}; +use sp_core::H160; +use sp_runtime::{traits::Zero, ArithmeticError, DispatchError, DispatchResult}; +use sp_std::vec::Vec; +use sp_weights::Weight; use up_data_structs::{ - AccessMode, COLLECTION_NUMBER_LIMIT, Collection, RpcCollection, RpcCollectionFlags, - CollectionId, CreateItemData, MAX_TOKEN_PREFIX_LENGTH, COLLECTION_ADMINS_LIMIT, TokenId, - TokenChild, CollectionStats, MAX_TOKEN_OWNERSHIP, CollectionMode, NFT_SPONSOR_TRANSFER_TIMEOUT, - FUNGIBLE_SPONSOR_TRANSFER_TIMEOUT, REFUNGIBLE_SPONSOR_TRANSFER_TIMEOUT, MAX_SPONSOR_TIMEOUT, - CUSTOM_DATA_LIMIT, CollectionLimits, CreateCollectionData, SponsorshipState, CreateItemExData, - SponsoringRateLimit, budget::Budget, PhantomType, Property, - CollectionProperties as CollectionPropertiesT, TokenProperties, PropertiesPermissionMap, - PropertyKey, PropertyValue, PropertyPermission, PropertiesError, TokenOwnerError, - PropertyKeyPermission, TokenData, TrySetProperty, PropertyScope, CollectionPermissions, + budget::Budget, AccessMode, Collection, CollectionId, CollectionLimits, CollectionMode, + CollectionPermissions, CollectionProperties as CollectionPropertiesT, CollectionStats, + CreateCollectionData, CreateItemData, CreateItemExData, PhantomType, PropertiesError, + PropertiesPermissionMap, Property, PropertyKey, PropertyKeyPermission, PropertyPermission, + PropertyScope, PropertyValue, RpcCollection, RpcCollectionFlags, SponsoringRateLimit, + SponsorshipState, TokenChild, TokenData, TokenId, TokenOwnerError, TokenProperties, + TrySetProperty, COLLECTION_ADMINS_LIMIT, COLLECTION_NUMBER_LIMIT, CUSTOM_DATA_LIMIT, + FUNGIBLE_SPONSOR_TRANSFER_TIMEOUT, MAX_SPONSOR_TIMEOUT, MAX_TOKEN_OWNERSHIP, + MAX_TOKEN_PREFIX_LENGTH, NFT_SPONSOR_TRANSFER_TIMEOUT, REFUNGIBLE_SPONSOR_TRANSFER_TIMEOUT, }; use up_pov_estimate_rpc::PovInfo; -pub use pallet::*; -use sp_core::H160; -use sp_runtime::{ArithmeticError, DispatchError, DispatchResult, traits::Zero}; - #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; pub mod dispatch; @@ -401,13 +401,16 @@ impl CollectionHandle { #[frame_support::pallet] pub mod pallet { - use super::*; use dispatch::CollectionDispatch; - use frame_support::{Blake2_128Concat, pallet_prelude::*, storage::Key, traits::StorageVersion}; - use up_data_structs::{TokenId, mapping::TokenAddressMapping}; + use frame_support::{ + pallet_prelude::*, storage::Key, traits::StorageVersion, Blake2_128Concat, + }; use scale_info::TypeInfo; + use up_data_structs::{mapping::TokenAddressMapping, TokenId}; use weights::WeightInfo; + use super::*; + #[pallet::config] pub trait Config: frame_system::Config + pallet_evm_coder_substrate::Config + pallet_evm::Config + TypeInfo @@ -2720,7 +2723,7 @@ pub fn init_token_properties_delta Weight>( #[cfg(any(feature = "tests", test))] #[allow(missing_docs)] pub mod tests { - use crate::{DispatchResult, DispatchError, LazyValue, Config}; + use crate::{Config, DispatchError, DispatchResult, LazyValue}; const fn to_bool(u: u8) -> bool { u != 0 diff --git a/pallets/configuration/src/benchmarking.rs b/pallets/configuration/src/benchmarking.rs index 8399b00e79..4943feb2a4 100644 --- a/pallets/configuration/src/benchmarking.rs +++ b/pallets/configuration/src/benchmarking.rs @@ -16,10 +16,11 @@ //! Benchmarking setup for pallet-configuration -use super::*; use frame_benchmarking::benchmarks; -use frame_system::{EventRecord, RawOrigin}; use frame_support::assert_ok; +use frame_system::{EventRecord, RawOrigin}; + +use super::*; fn assert_last_event(generic_event: ::RuntimeEvent) { let events = frame_system::Pallet::::events(); diff --git a/pallets/configuration/src/lib.rs b/pallets/configuration/src/lib.rs index 7648caa1e8..5a145e68ee 100644 --- a/pallets/configuration/src/lib.rs +++ b/pallets/configuration/src/lib.rs @@ -20,19 +20,17 @@ use core::marker::PhantomData; use frame_support::{ pallet, - weights::{WeightToFeePolynomial, WeightToFeeCoefficients, WeightToFeeCoefficient, Weight}, traits::Get, - Parameter, + weights::{Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial}, }; -use codec::{Decode, Encode, MaxEncodedLen}; +pub use pallet::*; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +use smallvec::smallvec; use sp_arithmetic::{ - per_things::{Perbill, PerThing}, + per_things::{PerThing, Perbill}, traits::{BaseArithmetic, Unsigned}, }; -use smallvec::smallvec; - -pub use pallet::*; use sp_core::U256; #[cfg(feature = "runtime-benchmarks")] @@ -41,15 +39,14 @@ pub mod weights; #[pallet] mod pallet { + use core::fmt::Debug; + + use frame_support::{pallet_prelude::*, traits::Get}; + use frame_system::{ensure_root, pallet_prelude::*}; + use parity_scale_codec::Codec; + use sp_arithmetic::{traits::AtLeast32BitUnsigned, FixedPointOperand, Permill}; + use super::*; - use frame_support::{ - traits::Get, - pallet_prelude::*, - log, - dispatch::{Codec, fmt::Debug}, - }; - use frame_system::{pallet_prelude::OriginFor, ensure_root, pallet_prelude::*}; - use sp_arithmetic::{FixedPointOperand, traits::AtLeast32BitUnsigned, Permill}; pub use crate::weights::WeightInfo; #[pallet::config] diff --git a/pallets/evm-coder-substrate/procedural/src/lib.rs b/pallets/evm-coder-substrate/procedural/src/lib.rs index b52395a402..b10f7cfc69 100644 --- a/pallets/evm-coder-substrate/procedural/src/lib.rs +++ b/pallets/evm-coder-substrate/procedural/src/lib.rs @@ -1,12 +1,12 @@ use std::result; -use proc_macro2::{TokenStream, Ident}; +use proc_macro2::{Ident, TokenStream}; use quote::quote; use syn::{ - Error, DeriveInput, Data, Attribute, + parenthesized, parse::{Parse, ParseBuffer}, spanned::Spanned, - Expr, parenthesized, + Attribute, Data, DeriveInput, Error, Expr, }; type Result = result::Result; diff --git a/pallets/evm-coder-substrate/src/execution.rs b/pallets/evm-coder-substrate/src/execution.rs index 0303a30764..2490f2ee42 100644 --- a/pallets/evm-coder-substrate/src/execution.rs +++ b/pallets/evm-coder-substrate/src/execution.rs @@ -22,10 +22,9 @@ use alloc::string::{String, ToString}; use std::string::{String, ToString}; use evm_coder::ERC165Call; +pub use evm_coder_substrate_procedural::PreDispatch; use evm_core::{ExitError, ExitFatal}; - pub use frame_support::weights::Weight; -pub use evm_coder_substrate_procedural::PreDispatch; /// Execution error, should be convertible between EVM and Substrate. #[derive(Debug, Clone)] diff --git a/pallets/evm-coder-substrate/src/lib.rs b/pallets/evm-coder-substrate/src/lib.rs index 259a904086..dd61808b70 100644 --- a/pallets/evm-coder-substrate/src/lib.rs +++ b/pallets/evm-coder-substrate/src/lib.rs @@ -22,43 +22,40 @@ extern crate self as pallet_evm_coder_substrate; extern crate alloc; #[cfg(not(feature = "std"))] use alloc::format; -use execution::PreDispatch; -use frame_support::dispatch::Weight; - use core::marker::PhantomData; -use sp_std::{cell::RefCell, vec::Vec}; -use codec::Decode; -use frame_support::pallet_prelude::DispatchError; -use frame_support::traits::PalletInfo; -use frame_support::{ensure, sp_runtime::ModuleError}; -use up_data_structs::budget; +use execution::PreDispatch; +use frame_support::{ + ensure, pallet_prelude::DispatchError, sp_runtime::ModuleError, traits::PalletInfo, +}; use pallet_evm::{ - ExitError, ExitRevert, ExitSucceed, GasWeightMapping, PrecompileFailure, PrecompileOutput, - PrecompileResult, PrecompileHandle, + ExitError, ExitRevert, ExitSucceed, GasWeightMapping, PrecompileFailure, PrecompileHandle, + PrecompileOutput, PrecompileResult, }; +use parity_scale_codec::Decode; use sp_core::{Get, H160}; +use sp_std::{cell::RefCell, vec::Vec}; +use sp_weights::Weight; +use up_data_structs::budget; // #[cfg(feature = "runtime-benchmarks")] // pub mod benchmarking; pub mod execution; -#[doc(hidden)] -pub use spez::spez; - +pub use evm_coder::{abi, solidity_interface, types, Contract, ResultWithPostInfoOf, ToLog}; use evm_coder::{ types::{Msg, Value}, AbiEncode, }; - pub use pallet::*; -pub use evm_coder::{ResultWithPostInfoOf, Contract, abi, solidity_interface, ToLog, types}; +#[doc(hidden)] +pub use spez::spez; #[frame_support::pallet] pub mod pallet { - use super::*; - pub use frame_support::dispatch::DispatchResult; + use super::*; + /// DispatchError is opaque, but we need to somehow extract correct error in case of OutOfGas failure /// So we have this pallet, which defines OutOfGas error, and knews its own id to check if DispatchError /// is thrown because of it diff --git a/pallets/evm-contract-helpers/src/eth.rs b/pallets/evm-contract-helpers/src/eth.rs index e58a7de244..268f1bedac 100644 --- a/pallets/evm-contract-helpers/src/eth.rs +++ b/pallets/evm-contract-helpers/src/eth.rs @@ -18,32 +18,35 @@ extern crate alloc; use core::marker::PhantomData; + use evm_coder::{ - abi::{AbiType, AbiEncode}, + abi::{AbiEncode, AbiType}, generate_stubgen, solidity_interface, types::*, ToLog, }; +use frame_support::traits::Get; +use frame_system::pallet_prelude::*; use pallet_common::eth; use pallet_evm::{ - ExitRevert, OnCreate, OnMethodCall, PrecompileResult, PrecompileFailure, PrecompileHandle, - account::CrossAccountId, + account::CrossAccountId, ExitRevert, OnCreate, OnMethodCall, PrecompileFailure, + PrecompileHandle, PrecompileResult, }; use pallet_evm_coder_substrate::{ - SubstrateRecorder, WithRecorder, dispatch_to_evm, - execution::{Result, PreDispatch}, - frontier_contract, + dispatch_to_evm, + execution::{PreDispatch, Result}, + frontier_contract, SubstrateRecorder, WithRecorder, }; use pallet_evm_transaction_payment::CallContext; use sp_core::{H160, U256}; +use sp_std::vec::Vec; use up_data_structs::SponsorshipState; +use up_sponsorship::SponsorshipHandler; + use crate::{ - AllowlistEnabled, Config, Owner, Pallet, SponsorBasket, SponsoringFeeLimit, - SponsoringRateLimit, SponsoringModeT, Sponsoring, + AllowlistEnabled, Config, Owner, Pallet, SponsorBasket, Sponsoring, SponsoringFeeLimit, + SponsoringModeT, SponsoringRateLimit, }; -use frame_support::traits::Get; -use up_sponsorship::SponsorshipHandler; -use sp_std::vec::Vec; frontier_contract! { macro_rules! ContractHelpers_result {...} diff --git a/pallets/evm-contract-helpers/src/lib.rs b/pallets/evm-contract-helpers/src/lib.rs index 768af126ed..59bac8c714 100644 --- a/pallets/evm-contract-helpers/src/lib.rs +++ b/pallets/evm-contract-helpers/src/lib.rs @@ -18,12 +18,12 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] -use codec::{Decode, Encode, MaxEncodedLen}; +pub use eth::*; use evm_coder::AbiCoder; +use frame_support::storage::bounded_btree_map::BoundedBTreeMap; pub use pallet::*; -pub use eth::*; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -use frame_support::storage::bounded_btree_map::BoundedBTreeMap; pub mod eth; /// Maximum number of methods per contract that could have fee limit @@ -31,14 +31,15 @@ pub const MAX_FEE_LIMITED_METHODS: u32 = 5; #[frame_support::pallet] pub mod pallet { - pub use super::*; + use evm_coder::ToLog; use frame_support::{pallet_prelude::*, sp_runtime::DispatchResult}; - use frame_system::{pallet_prelude::OriginFor, ensure_root}; + use frame_system::{ensure_root, pallet_prelude::*}; + use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; use sp_core::{H160, U256}; use sp_std::vec::Vec; - use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; use up_data_structs::SponsorshipState; - use evm_coder::ToLog; + + pub use super::*; #[pallet::config] pub trait Config: diff --git a/pallets/evm-migration/src/benchmarking.rs b/pallets/evm-migration/src/benchmarking.rs index b27c3bb0ef..49cf5e63e3 100644 --- a/pallets/evm-migration/src/benchmarking.rs +++ b/pallets/evm-migration/src/benchmarking.rs @@ -16,11 +16,12 @@ #![allow(missing_docs)] -use super::{Call, Config, Pallet}; use frame_benchmarking::benchmarks; use frame_system::RawOrigin; use sp_core::{H160, H256}; -use sp_std::{vec::Vec, vec}; +use sp_std::{vec, vec::Vec}; + +use super::{Call, Config, Pallet}; benchmarks! { where_clause { where ::RuntimeEvent: parity_scale_codec::Encode } diff --git a/pallets/evm-migration/src/lib.rs b/pallets/evm-migration/src/lib.rs index a253925870..a049286f63 100644 --- a/pallets/evm-migration/src/lib.rs +++ b/pallets/evm-migration/src/lib.rs @@ -26,15 +26,13 @@ pub mod weights; #[frame_support::pallet] pub mod pallet { - use frame_support::{ - pallet_prelude::{*, DispatchResult}, - traits::IsType, - }; - use frame_system::pallet_prelude::{*, OriginFor}; + use frame_support::{pallet_prelude::*, traits::IsType}; + use frame_system::pallet_prelude::*; + use pallet_evm::{Pallet as PalletEvm, PrecompileHandle}; use sp_core::{H160, H256}; use sp_std::vec::Vec; + use super::weights::WeightInfo; - use pallet_evm::{PrecompileHandle, Pallet as PalletEvm}; #[pallet::config] pub trait Config: frame_system::Config + pallet_evm::Config { diff --git a/pallets/evm-transaction-payment/src/lib.rs b/pallets/evm-transaction-payment/src/lib.rs index 184aa25850..6b79a838c4 100644 --- a/pallets/evm-transaction-payment/src/lib.rs +++ b/pallets/evm-transaction-payment/src/lib.rs @@ -19,20 +19,27 @@ #![deny(missing_docs)] use core::marker::PhantomData; -use fp_evm::WithdrawReason; -use frame_support::traits::IsSubType; + +use fp_evm::{CheckEvmTransaction, FeeCalculator, TransactionValidationError, WithdrawReason}; +use frame_support::{ + storage::with_transaction, + traits::{Currency, Imbalance, IsSubType, OnUnbalanced}, +}; pub use pallet::*; -use pallet_evm::{account::CrossAccountId, EnsureAddressOrigin}; +use pallet_evm::{ + account::CrossAccountId, EnsureAddressOrigin, NegativeImbalanceOf, OnChargeEVMTransaction, + OnCheckEvmTransaction, +}; use sp_core::{H160, U256}; -use sp_runtime::{TransactionOutcome, DispatchError}; +use sp_runtime::{traits::UniqueSaturatedInto, DispatchError, TransactionOutcome}; use up_sponsorship::SponsorshipHandler; #[frame_support::pallet] pub mod pallet { - use super::*; - use sp_std::vec::Vec; + use super::*; + /// Contains call data pub struct CallContext { /// Contract address diff --git a/pallets/foreign-assets/src/benchmarking.rs b/pallets/foreign-assets/src/benchmarking.rs index 9a3219d7cc..93bbba3085 100644 --- a/pallets/foreign-assets/src/benchmarking.rs +++ b/pallets/foreign-assets/src/benchmarking.rs @@ -16,15 +16,14 @@ #![allow(missing_docs)] -use super::{Config, Pallet, Call}; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{account, benchmarks}; +use frame_support::traits::Currency; use frame_system::RawOrigin; +use sp_std::{boxed::Box, vec::Vec}; +use staging_xcm::{opaque::latest::Junction::Parachain, v3::Junctions::X1, VersionedMultiLocation}; + +use super::{Call, Config, Pallet}; use crate::AssetMetadata; -use xcm::opaque::latest::Junction::Parachain; -use xcm::VersionedMultiLocation; -use xcm::v3::Junctions::X1; -use frame_support::traits::Currency; -use sp_std::{vec::Vec, boxed::Box}; fn bounded>>(slice: &[u8]) -> T { T::try_from(slice.to_vec()) diff --git a/pallets/foreign-assets/src/impl_fungibles.rs b/pallets/foreign-assets/src/impl_fungibles.rs index 1b34a538a7..59934a4a87 100644 --- a/pallets/foreign-assets/src/impl_fungibles.rs +++ b/pallets/foreign-assets/src/impl_fungibles.rs @@ -16,17 +16,16 @@ //! Implementations for fungibles trait. -use super::*; -use frame_system::Config as SystemConfig; - use frame_support::traits::tokens::{ - DepositConsequence, WithdrawConsequence, Preservation, Fortitude, Provenance, Precision, + DepositConsequence, Fortitude, Precision, Preservation, Provenance, WithdrawConsequence, }; -use pallet_common::CollectionHandle; +use frame_system::Config as SystemConfig; +use pallet_common::{CollectionHandle, CommonCollectionOperations}; use pallet_fungible::FungibleHandle; -use pallet_common::CommonCollectionOperations; -use up_data_structs::budget::Value; use sp_runtime::traits::{CheckedAdd, CheckedSub}; +use up_data_structs::budget::Value; + +use super::*; impl fungibles::Inspect<::AccountId> for Pallet where diff --git a/pallets/foreign-assets/src/lib.rs b/pallets/foreign-assets/src/lib.rs index 88fc345d31..5a24db092c 100644 --- a/pallets/foreign-assets/src/lib.rs +++ b/pallets/foreign-assets/src/lib.rs @@ -39,29 +39,26 @@ use frame_support::{ ensure, pallet_prelude::*, traits::{fungible, fungibles, Currency, EnsureOrigin}, - RuntimeDebug, }; use frame_system::pallet_prelude::*; -use up_data_structs::CollectionMode; +use pallet_common::erc::CrossAccountId; use pallet_fungible::Pallet as PalletFungible; use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; use sp_runtime::{ traits::{One, Zero}, ArithmeticError, }; use sp_std::{boxed::Box, vec::Vec}; -use up_data_structs::{CollectionId, TokenId, CreateCollectionData}; - +use staging_xcm::{latest::MultiLocation, VersionedMultiLocation}; // NOTE: MultiLocation is used in storages, we will need to do migration if upgrade the // MultiLocation to the XCM v3. -use xcm::opaque::latest::{prelude::XcmError, Weight}; -use xcm::{latest::MultiLocation, VersionedMultiLocation}; -use xcm_executor::{traits::WeightTrader, Assets}; - -use pallet_common::erc::CrossAccountId; - -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; +use staging_xcm::{ + opaque::latest::{prelude::XcmError, Weight}, + v3::XcmContext, +}; +use staging_xcm_executor::{traits::WeightTrader, Assets}; +use up_data_structs::{CollectionId, CollectionMode, CreateCollectionData, TokenId}; // TODO: Move to primitives // Id of native currency. diff --git a/pallets/fungible/src/benchmarking.rs b/pallets/fungible/src/benchmarking.rs index 2ced45af3b..ea03bae74b 100644 --- a/pallets/fungible/src/benchmarking.rs +++ b/pallets/fungible/src/benchmarking.rs @@ -14,14 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use super::*; -use crate::{Pallet, Config, FungibleHandle}; - +use frame_benchmarking::{account, benchmarks}; +use pallet_common::{bench_init, benchmarking::create_collection_raw}; use sp_std::prelude::*; -use pallet_common::benchmarking::create_collection_raw; -use frame_benchmarking::{benchmarks, account}; -use up_data_structs::{CollectionMode, MAX_ITEMS_PER_BATCH, budget::Unlimited}; -use pallet_common::bench_init; +use up_data_structs::{budget::Unlimited, CollectionMode, MAX_ITEMS_PER_BATCH}; + +use super::*; +use crate::{Config, FungibleHandle, Pallet}; const SEED: u32 = 1; diff --git a/pallets/fungible/src/common.rs b/pallets/fungible/src/common.rs index e459d52de6..b6bc49c11e 100644 --- a/pallets/fungible/src/common.rs +++ b/pallets/fungible/src/common.rs @@ -16,22 +16,24 @@ use core::marker::PhantomData; -use frame_support::{dispatch::DispatchResultWithPostInfo, ensure, fail, weights::Weight, traits::Get}; -use up_data_structs::{ - TokenId, CollectionId, CreateItemExData, budget::Budget, CreateItemData, TokenOwnerError, +use frame_support::{ + dispatch::DispatchResultWithPostInfo, ensure, fail, traits::Get, weights::Weight, }; use pallet_common::{ - CommonCollectionOperations, CommonWeightInfo, RefungibleExtensions, with_weight, - weights::WeightInfo as _, SelfWeightOf as PalletCommonWeightOf, + weights::WeightInfo as _, with_weight, CommonCollectionOperations, CommonWeightInfo, + RefungibleExtensions, SelfWeightOf as PalletCommonWeightOf, }; use pallet_structure::Error as StructureError; use sp_runtime::{ArithmeticError, DispatchError}; -use sp_std::{vec::Vec, vec}; -use up_data_structs::{Property, PropertyKey, PropertyValue, PropertyKeyPermission}; +use sp_std::{vec, vec::Vec}; +use up_data_structs::{ + budget::Budget, CollectionId, CreateItemData, CreateItemExData, Property, PropertyKey, + PropertyKeyPermission, PropertyValue, TokenId, TokenOwnerError, +}; use crate::{ - Allowance, TotalSupply, Balance, Config, Error, FungibleHandle, Pallet, SelfWeightOf, - weights::WeightInfo, + weights::WeightInfo, Allowance, Balance, Config, Error, FungibleHandle, Pallet, SelfWeightOf, + TotalSupply, }; pub struct CommonWeights(PhantomData); diff --git a/pallets/fungible/src/erc.rs b/pallets/fungible/src/erc.rs index bce84e214f..5ef76c8a4f 100644 --- a/pallets/fungible/src/erc.rs +++ b/pallets/fungible/src/erc.rs @@ -17,30 +17,31 @@ //! ERC-20 standart support implementation. extern crate alloc; -use core::char::{REPLACEMENT_CHARACTER, decode_utf16}; -use core::convert::TryInto; -use evm_coder::AbiCoder; -use evm_coder::{abi::AbiType, ToLog, generate_stubgen, solidity_interface, types::*}; -use up_data_structs::CollectionMode; +use core::{ + char::{decode_utf16, REPLACEMENT_CHARACTER}, + convert::TryInto, +}; + +use evm_coder::{abi::AbiType, generate_stubgen, solidity_interface, types::*, AbiCoder, ToLog}; use pallet_common::{ - CollectionHandle, - erc::{CommonEvmHandler, PrecompileResult, CollectionCall}, + erc::{CollectionCall, CommonEvmHandler, PrecompileResult}, eth::CrossAddress, - CommonWeightInfo as _, + CollectionHandle, CommonWeightInfo as _, }; -use sp_std::vec::Vec; use pallet_evm::{account::CrossAccountId, PrecompileHandle}; use pallet_evm_coder_substrate::{ call, dispatch_to_evm, execution::{PreDispatch, Result}, frontier_contract, }; -use pallet_structure::{SelfWeightOf as StructureWeight, weights::WeightInfo as _}; -use sp_core::{U256, Get}; +use pallet_structure::{weights::WeightInfo as _, SelfWeightOf as StructureWeight}; +use sp_core::{Get, U256}; +use sp_std::vec::Vec; +use up_data_structs::CollectionMode; use crate::{ - Allowance, Balance, Config, FungibleHandle, Pallet, TotalSupply, SelfWeightOf, - weights::WeightInfo, common::CommonWeights, + common::CommonWeights, weights::WeightInfo, Allowance, Balance, Config, FungibleHandle, Pallet, + SelfWeightOf, TotalSupply, }; frontier_contract! { diff --git a/pallets/fungible/src/lib.rs b/pallets/fungible/src/lib.rs index 96371e7179..9c8291a8d2 100644 --- a/pallets/fungible/src/lib.rs +++ b/pallets/fungible/src/lib.rs @@ -79,30 +79,26 @@ #![cfg_attr(not(feature = "std"), no_std)] use core::ops::Deref; + use evm_coder::ToLog; -use frame_support::{ - ensure, - pallet_prelude::{DispatchResultWithPostInfo, Pays}, - dispatch::PostDispatchInfo, -}; -use pallet_evm::account::CrossAccountId; -use up_data_structs::{ - AccessMode, CollectionId, TokenId, CreateCollectionData, mapping::TokenAddressMapping, - budget::Budget, PropertyKey, Property, -}; +use frame_support::{dispatch::PostDispatchInfo, ensure, pallet_prelude::*}; +pub use pallet::*; use pallet_common::{ - Error as CommonError, Event as CommonEvent, Pallet as PalletCommon, - eth::collection_id_to_address, SelfWeightOf as PalletCommonWeightOf, - weights::WeightInfo as CommonWeightInfo, helpers::add_weight_to_post_info, + eth::collection_id_to_address, helpers::add_weight_to_post_info, + weights::WeightInfo as CommonWeightInfo, Error as CommonError, Event as CommonEvent, + Pallet as PalletCommon, SelfWeightOf as PalletCommonWeightOf, }; -use pallet_evm::Pallet as PalletEvm; -use pallet_structure::Pallet as PalletStructure; +use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; use pallet_evm_coder_substrate::WithRecorder; +use pallet_structure::Pallet as PalletStructure; use sp_core::H160; use sp_runtime::{ArithmeticError, DispatchError, DispatchResult}; use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; +use up_data_structs::{ + budget::Budget, mapping::TokenAddressMapping, AccessMode, CollectionId, CreateCollectionData, + Property, PropertyKey, TokenId, +}; use weights::WeightInfo; -pub use pallet::*; use crate::erc::ERC20Events; #[cfg(feature = "runtime-benchmarks")] @@ -116,8 +112,11 @@ pub(crate) type SelfWeightOf = ::WeightInfo; #[frame_support::pallet] pub mod pallet { - use frame_support::{Blake2_128, Blake2_128Concat, Twox64Concat, pallet_prelude::*, storage::Key}; + use frame_support::{ + pallet_prelude::*, storage::Key, Blake2_128, Blake2_128Concat, Twox64Concat, + }; use up_data_structs::CollectionId; + use super::weights::WeightInfo; #[pallet::error] diff --git a/pallets/gov-origins/src/lib.rs b/pallets/gov-origins/src/lib.rs index ecd0d2548c..0e4f3b956c 100644 --- a/pallets/gov-origins/src/lib.rs +++ b/pallets/gov-origins/src/lib.rs @@ -17,7 +17,6 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::pallet_prelude::*; - pub use pallet::*; #[frame_support::pallet] diff --git a/pallets/identity/src/benchmarking.rs b/pallets/identity/src/benchmarking.rs index 16d416229e..62091e743f 100644 --- a/pallets/identity/src/benchmarking.rs +++ b/pallets/identity/src/benchmarking.rs @@ -37,17 +37,17 @@ #![cfg(feature = "runtime-benchmarks")] #![allow(clippy::no_effect)] -use super::*; - -use crate::Pallet as Identity; use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::{ - ensure, assert_ok, + assert_ok, ensure, traits::{EnsureOrigin, Get}, }; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; +use super::*; +use crate::Pallet as Identity; + const SEED: u32 = 0; fn assert_last_event(generic_event: ::RuntimeEvent) { diff --git a/pallets/identity/src/lib.rs b/pallets/identity/src/lib.rs index 163b98a9e9..3bd235ba97 100644 --- a/pallets/identity/src/lib.rs +++ b/pallets/identity/src/lib.rs @@ -95,21 +95,18 @@ mod tests; mod types; pub mod weights; -use frame_support::{ - traits::{BalanceStatus, Currency, OnUnbalanced, ReservableCurrency}, -}; +use frame_support::traits::{BalanceStatus, Currency, OnUnbalanced, ReservableCurrency}; +pub use pallet::*; use sp_runtime::{ - BoundedVec, traits::{AppendZerosInput, Hash, Saturating, StaticLookup, Zero}, + BoundedVec, }; use sp_std::prelude::*; -pub use weights::WeightInfo; - -pub use pallet::*; pub use types::{ Data, IdentityField, IdentityFields, IdentityInfo, Judgement, RegistrarIndex, RegistrarInfo, Registration, }; +pub use weights::WeightInfo; pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -132,10 +129,11 @@ type SubAccountsByAccountId = ( #[frame_support::pallet] pub mod pallet { - use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + use super::*; + #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. diff --git a/pallets/identity/src/tests.rs b/pallets/identity/src/tests.rs index aae4acd15f..859a1f3095 100644 --- a/pallets/identity/src/tests.rs +++ b/pallets/identity/src/tests.rs @@ -34,22 +34,23 @@ // Tests for Identity Pallet -use super::*; -use crate as pallet_identity; - -use codec::{Decode, Encode}; use frame_support::{ assert_noop, assert_ok, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64, EitherOfDiverse}, BoundedVec, }; use frame_system::{EnsureRoot, EnsureSignedBy}; +use parity_scale_codec::{Decode, Encode}; use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + BuildStorage, }; +use super::*; +use crate as pallet_identity; + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; diff --git a/pallets/identity/src/types.rs b/pallets/identity/src/types.rs index 83e139df5b..92ee3bdcee 100644 --- a/pallets/identity/src/types.rs +++ b/pallets/identity/src/types.rs @@ -32,13 +32,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::*; -use codec::{Decode, Encode, MaxEncodedLen}; use enumflags2::{bitflags, BitFlags}; use frame_support::{ traits::{ConstU32, Get}, BoundedVec, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::{ build::{Fields, Variants}, meta_type, Path, Type, TypeInfo, TypeParameter, @@ -46,6 +45,8 @@ use scale_info::{ use sp_runtime::{traits::Zero, RuntimeDebug}; use sp_std::{fmt::Debug, iter::once, ops::Add, prelude::*}; +use super::*; + /// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater /// than 32-bytes then it will be truncated when encoding. /// diff --git a/pallets/inflation/src/benchmarking.rs b/pallets/inflation/src/benchmarking.rs index f80e8bd440..430a9c3606 100644 --- a/pallets/inflation/src/benchmarking.rs +++ b/pallets/inflation/src/benchmarking.rs @@ -16,12 +16,12 @@ #![cfg(feature = "runtime-benchmarks")] +use frame_benchmarking::benchmarks; +use frame_support::traits::OnInitialize; + use super::*; use crate::Pallet as Inflation; -use frame_benchmarking::{benchmarks}; -use frame_support::traits::OnInitialize; - benchmarks! { on_initialize { diff --git a/pallets/inflation/src/lib.rs b/pallets/inflation/src/lib.rs index c690b2d4ae..e49af7e0f4 100644 --- a/pallets/inflation/src/lib.rs +++ b/pallets/inflation/src/lib.rs @@ -37,17 +37,14 @@ mod benchmarking; #[cfg(test)] mod tests; -use frame_support::{ - dispatch::{DispatchResult}, - traits::{ - fungible::{Balanced, Inspect, Mutate}, - Get, - tokens::Precision, - }, +use frame_support::traits::{ + fungible::{Balanced, Inspect, Mutate}, + tokens::Precision, + Get, }; +use frame_system::pallet_prelude::BlockNumberFor; pub use pallet::*; -use sp_runtime::{Perbill, traits::BlockNumberProvider}; - +use sp_runtime::{traits::BlockNumberProvider, Perbill}; use sp_std::convert::TryInto; type BalanceOf = @@ -61,10 +58,11 @@ pub const END_INFLATION_PERCENT: u32 = 4; #[frame_support::pallet] pub mod pallet { - use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + use super::*; + #[pallet::config] pub trait Config: frame_system::Config { type Currency: Balanced diff --git a/pallets/inflation/src/tests.rs b/pallets/inflation/src/tests.rs index 0e0a91767d..59821b9a32 100644 --- a/pallets/inflation/src/tests.rs +++ b/pallets/inflation/src/tests.rs @@ -16,14 +16,12 @@ #![cfg(test)] #![allow(clippy::from_over_into)] -use crate as pallet_inflation; - use frame_support::{ assert_ok, parameter_types, traits::{ fungible::{Balanced, Inspect}, - OnInitialize, Everything, ConstU32, tokens::Precision, + ConstU32, Everything, OnInitialize, }, weights::Weight, }; @@ -31,9 +29,11 @@ use frame_system::RawOrigin; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, BlockNumberProvider, IdentityLookup}, - testing::Header, + BuildStorage, }; +use crate as pallet_inflation; + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; diff --git a/pallets/maintenance/src/benchmarking.rs b/pallets/maintenance/src/benchmarking.rs index 21d3bf7d31..2f26928355 100644 --- a/pallets/maintenance/src/benchmarking.rs +++ b/pallets/maintenance/src/benchmarking.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use super::*; -use crate::{Pallet as Maintenance, Config}; - -use codec::Encode; use frame_benchmarking::benchmarks; -use frame_system::RawOrigin; use frame_support::{ensure, pallet_prelude::Weight, traits::StorePreimage}; +use frame_system::RawOrigin; +use parity_scale_codec::Encode; + +use super::*; +use crate::{Config, Pallet as Maintenance}; benchmarks! { enable { diff --git a/pallets/maintenance/src/lib.rs b/pallets/maintenance/src/lib.rs index f34caabe80..51cb9e6212 100644 --- a/pallets/maintenance/src/lib.rs +++ b/pallets/maintenance/src/lib.rs @@ -26,10 +26,14 @@ pub mod weights; #[frame_support::pallet] pub mod pallet { - use frame_support::{dispatch::*, pallet_prelude::*}; - use frame_support::traits::{QueryPreimage, StorePreimage, EnsureOrigin}; + use frame_support::{ + dispatch::*, + pallet_prelude::*, + traits::{EnsureOrigin, QueryPreimage, StorePreimage}, + }; use frame_system::pallet_prelude::*; use sp_core::H256; + use sp_runtime::traits::Dispatchable; use crate::weights::WeightInfo; @@ -111,7 +115,7 @@ pub mod pallet { hash: H256, weight_bound: Weight, ) -> DispatchResultWithPostInfo { - use codec::Decode; + use parity_scale_codec::Decode; T::PreimageOrigin::ensure_origin(origin.clone())?; diff --git a/pallets/nonfungible/src/benchmarking.rs b/pallets/nonfungible/src/benchmarking.rs index 65367feead..63886f5a39 100644 --- a/pallets/nonfungible/src/benchmarking.rs +++ b/pallets/nonfungible/src/benchmarking.rs @@ -14,10 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use super::*; -use crate::{Pallet, Config, NonfungibleHandle}; - -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{account, benchmarks}; use pallet_common::{ bench_init, benchmarking::{ @@ -27,10 +24,13 @@ use pallet_common::{ }; use sp_std::prelude::*; use up_data_structs::{ - CollectionMode, MAX_ITEMS_PER_BATCH, MAX_PROPERTIES_PER_ITEM, budget::Unlimited, - PropertyPermission, + budget::Unlimited, CollectionMode, PropertyPermission, MAX_ITEMS_PER_BATCH, + MAX_PROPERTIES_PER_ITEM, }; +use super::*; +use crate::{Config, NonfungibleHandle, Pallet}; + const SEED: u32 = 1; fn create_max_item_data(owner: T::CrossAccountId) -> CreateItemData { diff --git a/pallets/nonfungible/src/common.rs b/pallets/nonfungible/src/common.rs index 4854cdf9bd..5bdf7f5606 100644 --- a/pallets/nonfungible/src/common.rs +++ b/pallets/nonfungible/src/common.rs @@ -17,21 +17,21 @@ use core::marker::PhantomData; use frame_support::{dispatch::DispatchResultWithPostInfo, ensure, fail, weights::Weight}; -use up_data_structs::{ - TokenId, CreateItemExData, CollectionId, budget::Budget, Property, PropertyKey, - PropertyKeyPermission, PropertyValue, TokenOwnerError, -}; use pallet_common::{ - CommonCollectionOperations, CommonWeightInfo, RefungibleExtensions, with_weight, - weights::WeightInfo as _, SelfWeightOf as PalletCommonWeightOf, init_token_properties_delta, + init_token_properties_delta, weights::WeightInfo as _, with_weight, CommonCollectionOperations, + CommonWeightInfo, RefungibleExtensions, SelfWeightOf as PalletCommonWeightOf, }; use pallet_structure::Pallet as PalletStructure; use sp_runtime::DispatchError; -use sp_std::{vec::Vec, vec}; +use sp_std::{vec, vec::Vec}; +use up_data_structs::{ + budget::Budget, CollectionId, CreateItemExData, Property, PropertyKey, PropertyKeyPermission, + PropertyValue, TokenId, TokenOwnerError, +}; use crate::{ - AccountBalance, Allowance, Config, CreateItemData, Error, NonfungibleHandle, Owned, Pallet, - SelfWeightOf, TokenData, weights::WeightInfo, TokensMinted, TokenProperties, + weights::WeightInfo, AccountBalance, Allowance, Config, CreateItemData, Error, + NonfungibleHandle, Owned, Pallet, SelfWeightOf, TokenData, TokenProperties, TokensMinted, }; pub struct CommonWeights(PhantomData); diff --git a/pallets/nonfungible/src/erc.rs b/pallets/nonfungible/src/erc.rs index 5656474751..0dddd97bfd 100644 --- a/pallets/nonfungible/src/erc.rs +++ b/pallets/nonfungible/src/erc.rs @@ -23,34 +23,34 @@ extern crate alloc; use alloc::string::ToString; use core::{ - char::{REPLACEMENT_CHARACTER, decode_utf16}, + char::{decode_utf16, REPLACEMENT_CHARACTER}, convert::TryInto, }; -use evm_coder::{abi::AbiType, AbiCoder, ToLog, generate_stubgen, solidity_interface, types::*}; + +use evm_coder::{abi::AbiType, generate_stubgen, solidity_interface, types::*, AbiCoder, ToLog}; use frame_support::BoundedVec; -use up_data_structs::{ - TokenId, PropertyPermission, PropertyKeyPermission, Property, CollectionId, PropertyKey, - CollectionPropertiesVec, -}; -use pallet_evm_coder_substrate::{ - dispatch_to_evm, frontier_contract, - execution::{Result, PreDispatch, Error}, -}; -use sp_std::{vec::Vec, vec}; use pallet_common::{ - CollectionHandle, CollectionPropertyPermissions, CommonCollectionOperations, - erc::{CommonEvmHandler, PrecompileResult, CollectionCall, static_property::key}, + erc::{static_property::key, CollectionCall, CommonEvmHandler, PrecompileResult}, eth::{self, TokenUri}, - CommonWeightInfo, + CollectionHandle, CollectionPropertyPermissions, CommonCollectionOperations, CommonWeightInfo, }; use pallet_evm::{account::CrossAccountId, PrecompileHandle}; -use pallet_evm_coder_substrate::call; -use pallet_structure::{SelfWeightOf as StructureWeight, weights::WeightInfo as _}; -use sp_core::{U256, Get}; +use pallet_evm_coder_substrate::{ + call, dispatch_to_evm, + execution::{Error, PreDispatch, Result}, + frontier_contract, +}; +use pallet_structure::{weights::WeightInfo as _, SelfWeightOf as StructureWeight}; +use sp_core::{Get, U256}; +use sp_std::{vec, vec::Vec}; +use up_data_structs::{ + CollectionId, CollectionPropertiesVec, Property, PropertyKey, PropertyKeyPermission, + PropertyPermission, TokenId, +}; use crate::{ - AccountBalance, Config, CreateItemData, NonfungibleHandle, Pallet, TokenData, TokensMinted, - TokenProperties, SelfWeightOf, weights::WeightInfo, common::CommonWeights, + common::CommonWeights, weights::WeightInfo, AccountBalance, Config, CreateItemData, + NonfungibleHandle, Pallet, SelfWeightOf, TokenData, TokenProperties, TokensMinted, }; /// Nft events. diff --git a/pallets/refungible/src/benchmarking.rs b/pallets/refungible/src/benchmarking.rs index 630f4782cf..a76ccf8c5f 100644 --- a/pallets/refungible/src/benchmarking.rs +++ b/pallets/refungible/src/benchmarking.rs @@ -14,12 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use super::*; -use crate::{Pallet, Config, RefungibleHandle}; +use core::{convert::TryInto, iter::IntoIterator}; -use core::convert::TryInto; -use core::iter::IntoIterator; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{account, benchmarks}; use pallet_common::{ bench_init, benchmarking::{ @@ -28,10 +25,13 @@ use pallet_common::{ }; use sp_std::prelude::*; use up_data_structs::{ - CollectionMode, MAX_ITEMS_PER_BATCH, MAX_PROPERTIES_PER_ITEM, budget::Unlimited, - PropertyPermission, + budget::Unlimited, CollectionMode, PropertyPermission, MAX_ITEMS_PER_BATCH, + MAX_PROPERTIES_PER_ITEM, }; +use super::*; +use crate::{Config, Pallet, RefungibleHandle}; + const SEED: u32 = 1; fn create_max_item_data( diff --git a/pallets/refungible/src/common.rs b/pallets/refungible/src/common.rs index 0d64f8deb8..cfb3af2ffb 100644 --- a/pallets/refungible/src/common.rs +++ b/pallets/refungible/src/common.rs @@ -16,24 +16,25 @@ use core::marker::PhantomData; -use sp_std::collections::btree_map::BTreeMap; -use frame_support::{dispatch::DispatchResultWithPostInfo, ensure, fail, weights::Weight, traits::Get}; -use up_data_structs::{ - CollectionId, TokenId, CreateItemExData, budget::Budget, Property, PropertyKey, PropertyValue, - PropertyKeyPermission, CreateRefungibleExMultipleOwners, CreateRefungibleExSingleOwner, - TokenOwnerError, +use frame_support::{ + dispatch::DispatchResultWithPostInfo, ensure, fail, traits::Get, weights::Weight, }; use pallet_common::{ - CommonCollectionOperations, CommonWeightInfo, RefungibleExtensions, with_weight, - weights::WeightInfo as _, init_token_properties_delta, + init_token_properties_delta, weights::WeightInfo as _, with_weight, CommonCollectionOperations, + CommonWeightInfo, RefungibleExtensions, +}; +use pallet_structure::{Error as StructureError, Pallet as PalletStructure}; +use sp_runtime::DispatchError; +use sp_std::{collections::btree_map::BTreeMap, vec, vec::Vec}; +use up_data_structs::{ + budget::Budget, CollectionId, CreateItemExData, CreateRefungibleExMultipleOwners, + CreateRefungibleExSingleOwner, Property, PropertyKey, PropertyKeyPermission, PropertyValue, + TokenId, TokenOwnerError, }; -use pallet_structure::{Pallet as PalletStructure, Error as StructureError}; -use sp_runtime::{DispatchError}; -use sp_std::{vec::Vec, vec}; use crate::{ - AccountBalance, Allowance, Balance, Config, Error, Owned, Pallet, RefungibleHandle, - SelfWeightOf, weights::WeightInfo, TokensMinted, TotalSupply, CreateItemData, TokenProperties, + weights::WeightInfo, AccountBalance, Allowance, Balance, Config, CreateItemData, Error, Owned, + Pallet, RefungibleHandle, SelfWeightOf, TokenProperties, TokensMinted, TotalSupply, }; macro_rules! max_weight_of { diff --git a/pallets/refungible/src/erc.rs b/pallets/refungible/src/erc.rs index 4323ead058..89d0d87c27 100644 --- a/pallets/refungible/src/erc.rs +++ b/pallets/refungible/src/erc.rs @@ -23,34 +23,35 @@ extern crate alloc; use alloc::string::ToString; use core::{ - char::{REPLACEMENT_CHARACTER, decode_utf16}, + char::{decode_utf16, REPLACEMENT_CHARACTER}, convert::TryInto, }; -use evm_coder::{abi::AbiType, AbiCoder, ToLog, generate_stubgen, solidity_interface, types::*}; + +use evm_coder::{abi::AbiType, generate_stubgen, solidity_interface, types::*, AbiCoder, ToLog}; use frame_support::{BoundedBTreeMap, BoundedVec}; use pallet_common::{ + erc::{static_property::key, CollectionCall, CommonEvmHandler}, + eth::{self, TokenUri}, CollectionHandle, CollectionPropertyPermissions, CommonCollectionOperations, Error as CommonError, - erc::{CommonEvmHandler, CollectionCall, static_property::key}, - eth::{self, TokenUri}, }; use pallet_evm::{account::CrossAccountId, PrecompileHandle}; use pallet_evm_coder_substrate::{ call, dispatch_to_evm, - execution::{PreDispatch, Result, Error}, + execution::{Error, PreDispatch, Result}, frontier_contract, }; -use pallet_structure::{SelfWeightOf as StructureWeight, weights::WeightInfo as _}; -use sp_core::{H160, U256, Get}; -use sp_std::{collections::btree_map::BTreeMap, vec::Vec, vec}; +use pallet_structure::{weights::WeightInfo as _, SelfWeightOf as StructureWeight}; +use sp_core::{Get, H160, U256}; +use sp_std::{collections::btree_map::BTreeMap, vec, vec::Vec}; use up_data_structs::{ - CollectionId, CollectionPropertiesVec, mapping::TokenAddressMapping, Property, PropertyKey, + mapping::TokenAddressMapping, CollectionId, CollectionPropertiesVec, Property, PropertyKey, PropertyKeyPermission, PropertyPermission, TokenId, TokenOwnerError, }; use crate::{ - AccountBalance, Balance, Config, CreateItemData, Pallet, RefungibleHandle, TokenProperties, - TokensMinted, TotalSupply, SelfWeightOf, weights::WeightInfo, + weights::WeightInfo, AccountBalance, Balance, Config, CreateItemData, Pallet, RefungibleHandle, + SelfWeightOf, TokenProperties, TokensMinted, TotalSupply, }; frontier_contract! { diff --git a/pallets/refungible/src/erc_token.rs b/pallets/refungible/src/erc_token.rs index 35254755a1..7269552ea3 100644 --- a/pallets/refungible/src/erc_token.rs +++ b/pallets/refungible/src/erc_token.rs @@ -20,11 +20,12 @@ //! Method implementations are mostly doing parameter conversion and calling Nonfungible Pallet methods. use core::{ - char::{REPLACEMENT_CHARACTER, decode_utf16}, + char::{decode_utf16, REPLACEMENT_CHARACTER}, convert::TryInto, ops::Deref, }; -use evm_coder::{abi::AbiType, ToLog, generate_stubgen, solidity_interface, types::*}; + +use evm_coder::{abi::AbiType, generate_stubgen, solidity_interface, types::*, ToLog}; use pallet_common::{ erc::{CommonEvmHandler, PrecompileResult}, eth::{collection_id_to_address, CrossAddress}, @@ -32,17 +33,18 @@ use pallet_common::{ }; use pallet_evm::{account::CrossAccountId, PrecompileHandle}; use pallet_evm_coder_substrate::{ - call, dispatch_to_evm, WithRecorder, frontier_contract, - execution::{Result, PreDispatch}, + call, dispatch_to_evm, + execution::{PreDispatch, Result}, + frontier_contract, WithRecorder, }; -use pallet_structure::{SelfWeightOf as StructureWeight, weights::WeightInfo as _}; -use sp_std::vec::Vec; +use pallet_structure::{weights::WeightInfo as _, SelfWeightOf as StructureWeight}; use sp_core::U256; +use sp_std::vec::Vec; use up_data_structs::TokenId; use crate::{ - Allowance, Balance, Config, Pallet, RefungibleHandle, TotalSupply, common::CommonWeights, - SelfWeightOf, weights::WeightInfo, + common::CommonWeights, weights::WeightInfo, Allowance, Balance, Config, Pallet, + RefungibleHandle, SelfWeightOf, TotalSupply, }; /// Refungible token handle contains information about token's collection and id diff --git a/pallets/refungible/src/lib.rs b/pallets/refungible/src/lib.rs index e9f4f9575e..766859a588 100644 --- a/pallets/refungible/src/lib.rs +++ b/pallets/refungible/src/lib.rs @@ -87,30 +87,29 @@ #![cfg_attr(not(feature = "std"), no_std)] -use crate::erc_token::ERC20Events; -use crate::erc::ERC721Events; +use core::{cmp::Ordering, ops::Deref}; -use core::{ops::Deref, cmp::Ordering}; use evm_coder::ToLog; use frame_support::{ensure, storage::with_transaction, transactional}; -use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; -use pallet_evm_coder_substrate::WithRecorder; +pub use pallet::*; use pallet_common::{ - Error as CommonError, eth::collection_id_to_address, Event as CommonEvent, + eth::collection_id_to_address, Error as CommonError, Event as CommonEvent, Pallet as PalletCommon, }; +use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; +use pallet_evm_coder_substrate::WithRecorder; use pallet_structure::Pallet as PalletStructure; use sp_core::{Get, H160}; use sp_runtime::{ArithmeticError, DispatchError, DispatchResult, TransactionOutcome}; -use sp_std::{vec::Vec, vec, collections::btree_map::BTreeMap}; +use sp_std::{collections::btree_map::BTreeMap, vec, vec::Vec}; use up_data_structs::{ - AccessMode, budget::Budget, CollectionId, CreateCollectionData, mapping::TokenAddressMapping, - MAX_REFUNGIBLE_PIECES, Property, PropertyKey, PropertyKeyPermission, PropertyScope, - PropertyValue, TokenId, PropertiesPermissionMap, CreateRefungibleExMultipleOwners, - TokenOwnerError, TokenProperties as TokenPropertiesT, + budget::Budget, mapping::TokenAddressMapping, AccessMode, CollectionId, CreateCollectionData, + CreateRefungibleExMultipleOwners, PropertiesPermissionMap, Property, PropertyKey, + PropertyKeyPermission, PropertyScope, PropertyValue, TokenId, TokenOwnerError, + TokenProperties as TokenPropertiesT, TrySetProperty, MAX_REFUNGIBLE_PIECES, }; -pub use pallet::*; +use crate::{erc::ERC721Events, erc_token::ERC20Events}; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; pub mod common; @@ -124,13 +123,13 @@ pub(crate) type SelfWeightOf = ::WeightInfo; #[frame_support::pallet] pub mod pallet { - use super::*; use frame_support::{ - Blake2_128, Blake2_128Concat, Twox64Concat, pallet_prelude::*, storage::Key, - traits::StorageVersion, + pallet_prelude::*, storage::Key, traits::StorageVersion, Blake2_128, Blake2_128Concat, + Twox64Concat, }; use up_data_structs::{CollectionId, TokenId}; - use super::weights::WeightInfo; + + use super::{weights::WeightInfo, *}; #[pallet::error] pub enum Error { diff --git a/pallets/structure/src/benchmarking.rs b/pallets/structure/src/benchmarking.rs index 178b7b9d2e..dfc97269cf 100644 --- a/pallets/structure/src/benchmarking.rs +++ b/pallets/structure/src/benchmarking.rs @@ -14,15 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use super::*; - -use frame_benchmarking::{benchmarks, account}; -use frame_support::traits::{fungible::Balanced, Get, tokens::Precision}; -use up_data_structs::{ - CreateCollectionData, CollectionMode, CreateItemData, CreateNftData, budget::Unlimited, -}; +use frame_benchmarking::{account, benchmarks}; +use frame_support::traits::{fungible::Balanced, tokens::Precision, Get}; use pallet_common::Config as CommonConfig; use pallet_evm::account::CrossAccountId; +use up_data_structs::{ + budget::Unlimited, CollectionMode, CreateCollectionData, CreateItemData, CreateNftData, +}; + +use super::*; const SEED: u32 = 1; diff --git a/pallets/structure/src/lib.rs b/pallets/structure/src/lib.rs index 3374ccdd65..2014cc44b7 100644 --- a/pallets/structure/src/lib.rs +++ b/pallets/structure/src/lib.rs @@ -53,29 +53,31 @@ #![cfg_attr(not(feature = "std"), no_std)] -use pallet_common::CommonCollectionOperations; -use pallet_common::{erc::CrossAccountId, eth::is_collection}; +use frame_support::{ + dispatch::{DispatchResult, DispatchResultWithPostInfo}, + fail, + pallet_prelude::*, +}; +use pallet_common::{ + dispatch::CollectionDispatch, erc::CrossAccountId, eth::is_collection, + CommonCollectionOperations, +}; use sp_std::collections::btree_set::BTreeSet; - -use frame_support::dispatch::{DispatchError, DispatchResult, DispatchResultWithPostInfo}; -use frame_support::fail; -pub use pallet::*; -use pallet_common::{dispatch::CollectionDispatch}; use up_data_structs::{ - CollectionId, TokenId, mapping::TokenAddressMapping, budget::Budget, TokenOwnerError, + budget::Budget, mapping::TokenAddressMapping, CollectionId, TokenId, TokenOwnerError, }; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; pub mod weights; +pub use pallet::*; + pub type SelfWeightOf = ::WeightInfo; #[frame_support::pallet] pub mod pallet { - use frame_support::Parameter; - use frame_support::dispatch::{GetDispatchInfo, UnfilteredDispatchable}; - use frame_support::pallet_prelude::*; + use frame_support::{dispatch::GetDispatchInfo, traits::UnfilteredDispatchable, Parameter}; use super::*; diff --git a/pallets/unique/src/benchmarking.rs b/pallets/unique/src/benchmarking.rs index 1d182a1781..265be355fe 100644 --- a/pallets/unique/src/benchmarking.rs +++ b/pallets/unique/src/benchmarking.rs @@ -16,21 +16,22 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; -use crate::Pallet; +use frame_benchmarking::{account, benchmarks}; +use frame_support::traits::{fungible::Balanced, tokens::Precision, Get}; use frame_system::RawOrigin; -use frame_support::traits::{fungible::Balanced, Get, tokens::Precision}; -use frame_benchmarking::{benchmarks, account}; -use sp_runtime::DispatchError; use pallet_common::{ - Config as CommonConfig, benchmarking::{create_data, create_u16_data}, + erc::CrossAccountId, + Config as CommonConfig, }; +use sp_runtime::DispatchError; use up_data_structs::{ - CollectionId, CollectionMode, MAX_COLLECTION_NAME_LENGTH, MAX_TOKEN_PREFIX_LENGTH, - MAX_COLLECTION_DESCRIPTION_LENGTH, CollectionLimits, + CollectionId, CollectionLimits, CollectionMode, MAX_COLLECTION_DESCRIPTION_LENGTH, + MAX_COLLECTION_NAME_LENGTH, MAX_TOKEN_PREFIX_LENGTH, }; -use pallet_common::erc::CrossAccountId; + +use super::*; +use crate::Pallet; const SEED: u32 = 1; diff --git a/pallets/unique/src/eth/mod.rs b/pallets/unique/src/eth/mod.rs index cf43a10225..3a41e667b1 100644 --- a/pallets/unique/src/eth/mod.rs +++ b/pallets/unique/src/eth/mod.rs @@ -16,23 +16,25 @@ //! Implementation of CollectionHelpers contract. //! +use alloc::{collections::BTreeSet, format}; use core::marker::PhantomData; + use ethereum as _; use evm_coder::{abi::AbiType, generate_stubgen, solidity_interface, types::*}; -use frame_support::{BoundedVec, traits::Get}; +use frame_support::{traits::Get, BoundedVec}; use pallet_common::{ - CollectionById, dispatch::CollectionDispatch, - erc::{CollectionHelpersEvents, static_property::key}, - eth::{self, map_eth_to_id, collection_id_to_address}, - Pallet as PalletCommon, CollectionHandle, + erc::{static_property::key, CollectionHelpersEvents}, + eth::{self, collection_id_to_address, map_eth_to_id}, + CollectionById, CollectionHandle, Pallet as PalletCommon, }; use pallet_evm::{account::CrossAccountId, OnMethodCall, PrecompileHandle, PrecompileResult}; use pallet_evm_coder_substrate::{ - dispatch_to_evm, SubstrateRecorder, WithRecorder, - execution::{PreDispatch, Result, Error}, - frontier_contract, + dispatch_to_evm, + execution::{Error, PreDispatch, Result}, + frontier_contract, SubstrateRecorder, WithRecorder, }; +use sp_std::vec::Vec; use up_data_structs::{ CollectionDescription, CollectionMode, CollectionName, CollectionPermissions, CollectionTokenPrefix, CreateCollectionData, NestingPermissions, @@ -40,9 +42,6 @@ use up_data_structs::{ use crate::{weights::WeightInfo, Config, Pallet, SelfWeightOf}; -use alloc::{format, collections::BTreeSet}; -use sp_std::vec::Vec; - frontier_contract! { macro_rules! EvmCollectionHelpers_result {...} impl Contract for EvmCollectionHelpers {...} diff --git a/pallets/unique/src/lib.rs b/pallets/unique/src/lib.rs index c61940d0f8..1c098beb18 100644 --- a/pallets/unique/src/lib.rs +++ b/pallets/unique/src/lib.rs @@ -73,9 +73,9 @@ extern crate alloc; -pub use pallet::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; +pub use pallet::*; pub mod eth; #[cfg(feature = "runtime-benchmarks")] @@ -84,27 +84,27 @@ pub mod weights; #[frame_support::pallet] pub mod pallet { - use super::*; - - use frame_support::{dispatch::DispatchResult, ensure, fail, BoundedVec, storage::Key}; + use frame_support::{dispatch::DispatchResult, ensure, fail, storage::Key, BoundedVec}; + use frame_system::{ensure_root, ensure_signed}; + use pallet_common::{ + dispatch::{dispatch_tx, CollectionDispatch}, + CollectionHandle, CommonWeightInfo, Pallet as PalletCommon, RefungibleExtensionsWeightInfo, + }; + use pallet_evm::account::CrossAccountId; use scale_info::TypeInfo; - use frame_system::{ensure_signed, ensure_root}; use sp_std::{vec, vec::Vec}; use up_data_structs::{ - MAX_COLLECTION_NAME_LENGTH, MAX_COLLECTION_DESCRIPTION_LENGTH, MAX_TOKEN_PREFIX_LENGTH, - MAX_PROPERTIES_PER_ITEM, MAX_PROPERTY_KEY_LENGTH, MAX_PROPERTY_VALUE_LENGTH, - MAX_COLLECTION_PROPERTIES_SIZE, COLLECTION_ADMINS_LIMIT, MAX_TOKEN_PROPERTIES_SIZE, - CreateItemData, CollectionLimits, CollectionPermissions, CollectionId, CollectionMode, - TokenId, CreateCollectionData, CreateItemExData, budget, Property, PropertyKey, - PropertyKeyPermission, - }; - use pallet_evm::account::CrossAccountId; - use pallet_common::{ - CollectionHandle, Pallet as PalletCommon, CommonWeightInfo, dispatch::dispatch_tx, - dispatch::CollectionDispatch, RefungibleExtensionsWeightInfo, + budget, CollectionId, CollectionLimits, CollectionMode, CollectionPermissions, + CreateCollectionData, CreateItemData, CreateItemExData, Property, PropertyKey, + PropertyKeyPermission, TokenId, COLLECTION_ADMINS_LIMIT, MAX_COLLECTION_DESCRIPTION_LENGTH, + MAX_COLLECTION_NAME_LENGTH, MAX_COLLECTION_PROPERTIES_SIZE, MAX_PROPERTIES_PER_ITEM, + MAX_PROPERTY_KEY_LENGTH, MAX_PROPERTY_VALUE_LENGTH, MAX_TOKEN_PREFIX_LENGTH, + MAX_TOKEN_PROPERTIES_SIZE, }; use weights::WeightInfo; + use super::*; + /// A maximum number of levels of depth in the token nesting tree. pub const NESTING_BUDGET: u32 = 5; diff --git a/primitives/app_promotion_rpc/src/lib.rs b/primitives/app_promotion_rpc/src/lib.rs index e381ed36d4..7af9da9d3a 100644 --- a/primitives/app_promotion_rpc/src/lib.rs +++ b/primitives/app_promotion_rpc/src/lib.rs @@ -16,12 +16,12 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::vec::Vec; -use codec::Decode; +use parity_scale_codec::Decode; use sp_runtime::{ - DispatchError, traits::{AtLeast32BitUnsigned, Member}, + DispatchError, }; +use sp_std::vec::Vec; type Result = core::result::Result; diff --git a/primitives/common/src/constants.rs b/primitives/common/src/constants.rs index 0ee8271b9b..11ed1903ed 100644 --- a/primitives/common/src/constants.rs +++ b/primitives/common/src/constants.rs @@ -14,13 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use sp_runtime::Perbill; +use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; use frame_support::{ parameter_types, - weights::{Weight, constants::WEIGHT_REF_TIME_PER_SECOND}, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; -use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; -use crate::types::{BlockNumber, Balance}; +use sp_runtime::Perbill; + +use crate::types::{Balance, BlockNumber}; pub const MILLISECS_PER_BLOCK: u64 = 12000; pub const MILLISECS_PER_RELAY_BLOCK: u64 = 6000; diff --git a/primitives/common/src/types.rs b/primitives/common/src/types.rs index 8cb4b266ce..06e751100f 100644 --- a/primitives/common/src/types.rs +++ b/primitives/common/src/types.rs @@ -16,7 +16,7 @@ use sp_runtime::{ generic, - traits::{Verify, IdentifyAccount}, + traits::{IdentifyAccount, Verify}, MultiSignature, }; @@ -27,7 +27,7 @@ use sp_runtime::{ pub mod opaque { pub use sp_runtime::{generic, traits::BlakeTwo256, OpaqueExtrinsic as UncheckedExtrinsic}; - pub use super::{BlockNumber, Signature, AccountId, Balance, Index, Hash, AuraId}; + pub use super::{AccountId, AuraId, Balance, BlockNumber, Hash, Signature}; #[derive(Debug, Clone)] pub enum RuntimeId { diff --git a/primitives/data-structs/src/bounded.rs b/primitives/data-structs/src/bounded.rs index 5e8d559fe0..7d8c70771a 100644 --- a/primitives/data-structs/src/bounded.rs +++ b/primitives/data-structs/src/bounded.rs @@ -17,12 +17,14 @@ //! This module contins implementations for support bounded structures ([`BoundedVec`], [`BoundedBTreeMap`], [`BoundedBTreeSet`]) in [`serde`]. use core::fmt; -use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; -use sp_std::vec::Vec; use frame_support::{ - BoundedVec, storage::{bounded_btree_map::BoundedBTreeMap, bounded_btree_set::BoundedBTreeSet}, + BoundedVec, +}; +use sp_std::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + vec::Vec, }; /// [`serde`] implementations for [`BoundedVec`]. diff --git a/primitives/data-structs/src/lib.rs b/primitives/data-structs/src/lib.rs index ec9073a321..74aeafeace 100644 --- a/primitives/data-structs/src/lib.rs +++ b/primitives/data-structs/src/lib.rs @@ -25,20 +25,21 @@ use core::{ fmt, ops::Deref, }; -use frame_support::storage::{bounded_btree_map::BoundedBTreeMap, bounded_btree_set::BoundedBTreeSet}; -#[cfg(feature = "serde")] -use serde::{Serialize, Deserialize}; - -use sp_core::U256; -use sp_runtime::{ArithmeticError, sp_std::prelude::Vec}; -use sp_std::collections::btree_set::BTreeSet; -use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use frame_support::{BoundedVec, traits::ConstU32}; +use bondrewd::Bitfields; use derivative::Derivative; -use scale_info::TypeInfo; use evm_coder::AbiCoderFlags; -use bondrewd::Bitfields; +use frame_support::{ + storage::{bounded_btree_map::BoundedBTreeMap, bounded_btree_set::BoundedBTreeSet}, + traits::ConstU32, + BoundedVec, +}; +use parity_scale_codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; +use sp_core::U256; +use sp_runtime::{sp_std::prelude::Vec, ArithmeticError}; +use sp_std::collections::btree_set::BTreeSet; mod bondrewd_codec; mod bounded; diff --git a/primitives/data-structs/src/mapping.rs b/primitives/data-structs/src/mapping.rs index 23d95a3dd2..142fc110f1 100644 --- a/primitives/data-structs/src/mapping.rs +++ b/primitives/data-structs/src/mapping.rs @@ -18,10 +18,10 @@ use core::marker::PhantomData; +use pallet_evm::account::CrossAccountId; use sp_core::H160; use crate::{CollectionId, TokenId}; -use pallet_evm::account::CrossAccountId; /// Trait for mapping between token id and some `Address`. pub trait TokenAddressMapping

{ diff --git a/primitives/data-structs/src/migration.rs b/primitives/data-structs/src/migration.rs index cbcdbf43cc..7718a01f7e 100644 --- a/primitives/data-structs/src/migration.rs +++ b/primitives/data-structs/src/migration.rs @@ -17,8 +17,9 @@ /// Storage migration is not required for this change, as SponsoringRateLimit has same encoding as Option #[test] fn sponsoring_rate_limit_has_same_encoding_as_option_u32() { + use parity_scale_codec::Encode; + use crate::SponsoringRateLimit; - use codec::Encode; fn limit_to_option(limit: SponsoringRateLimit) -> Option { match limit { @@ -41,8 +42,9 @@ fn sponsoring_rate_limit_has_same_encoding_as_option_u32() { #[test] fn collection_flags_have_same_encoding_as_bool() { + use parity_scale_codec::Encode; + use crate::CollectionFlags; - use codec::Encode; assert_eq!( true.encode(), diff --git a/primitives/pov-estimate-rpc/src/lib.rs b/primitives/pov-estimate-rpc/src/lib.rs index 0481da616f..697a0f6046 100644 --- a/primitives/pov-estimate-rpc/src/lib.rs +++ b/primitives/pov-estimate-rpc/src/lib.rs @@ -17,12 +17,10 @@ #![cfg_attr(not(feature = "std"), no_std)] use scale_info::TypeInfo; -use sp_std::vec::Vec; - #[cfg(feature = "std")] use serde::Serialize; - use sp_runtime::ApplyExtrinsicResult; +use sp_std::vec::Vec; #[cfg_attr(feature = "std", derive(Serialize))] #[derive(Debug, TypeInfo)] diff --git a/primitives/rpc/src/lib.rs b/primitives/rpc/src/lib.rs index 8ef914ca52..1deba23a98 100644 --- a/primitives/rpc/src/lib.rs +++ b/primitives/rpc/src/lib.rs @@ -18,15 +18,14 @@ extern crate alloc; +use parity_scale_codec::Decode; +use sp_runtime::DispatchError; +use sp_std::vec::Vec; use up_data_structs::{ - CollectionId, TokenId, RawEncoded, RpcCollection, CollectionStats, CollectionLimits, Property, - PropertyKeyPermission, TokenData, TokenChild, TokenDataVersion1, + CollectionId, CollectionLimits, CollectionStats, Property, PropertyKeyPermission, + RpcCollection, TokenChild, TokenData, TokenId, }; -use sp_std::vec::Vec; -use codec::Decode; -use sp_runtime::DispatchError; - type Result = core::result::Result; sp_api::decl_runtime_apis! { diff --git a/runtime/common/config/ethereum.rs b/runtime/common/config/ethereum.rs index b70e875ccc..bfe3da6d82 100644 --- a/runtime/common/config/ethereum.rs +++ b/runtime/common/config/ethereum.rs @@ -1,22 +1,24 @@ -use sp_core::{U256, H160}; use frame_support::{ - weights::{Weight, constants::WEIGHT_REF_TIME_PER_SECOND}, - traits::{FindAuthor}, - parameter_types, ConsensusEngineId, + parameter_types, + traits::FindAuthor, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, + ConsensusEngineId, }; -use sp_runtime::{RuntimeAppPublic, Perbill, traits::ConstU32}; +use pallet_ethereum::PostLogContent; +use pallet_evm::{EnsureAddressTruncated, HashedAddressMapping}; +use sp_core::{H160, U256}; +use sp_runtime::{traits::ConstU32, Perbill, RuntimeAppPublic}; +use up_common::constants::*; + use crate::{ runtime_common::{ config::sponsoring::DefaultSponsoringRateLimit, - DealWithFees, dispatch::CollectionDispatchT, ethereum::{precompiles::UniquePrecompiles, sponsoring::EvmSponsorshipHandler}, + DealWithFees, }, - Runtime, Aura, Balances, RuntimeEvent, ChainId, + Aura, Balances, ChainId, Runtime, RuntimeEvent, }; -use pallet_evm::{EnsureAddressTruncated, HashedAddressMapping}; -use pallet_ethereum::PostLogContent; -use up_common::constants::*; pub type CrossAccountId = pallet_evm::account::BasicCrossAccountId; diff --git a/runtime/common/config/governance/fellowship.rs b/runtime/common/config/governance/fellowship.rs index 6274590b06..ebeeedd09e 100644 --- a/runtime/common/config/governance/fellowship.rs +++ b/runtime/common/config/governance/fellowship.rs @@ -1,8 +1,11 @@ -use crate::{Preimage, Treasury, RuntimeCall, RuntimeEvent, Scheduler, FellowshipReferenda, Runtime}; -use super::*; use pallet_gov_origins::Origin as GovOrigins; use pallet_ranked_collective::{Config as RankedConfig, Rank, TallyOf}; +use super::*; +use crate::{ + FellowshipReferenda, Preimage, Runtime, RuntimeCall, RuntimeEvent, Scheduler, Treasury, +}; + pub const FELLOWSHIP_MODULE_ID: PalletId = PalletId(*b"flowship"); pub const DEMOCRACY_TRACK_ID: u16 = 10; diff --git a/runtime/common/config/governance/mod.rs b/runtime/common/config/governance/mod.rs index adc3476c53..f522ef71e5 100644 --- a/runtime/common/config/governance/mod.rs +++ b/runtime/common/config/governance/mod.rs @@ -15,28 +15,30 @@ // along with Unique Network. If not, see . use frame_support::{ - PalletId, parameter_types, + pallet_prelude::*, + parameter_types, traits::{ - EnsureOrigin, EqualPrivilegeOnly, EitherOfDiverse, EitherOf, MapSuccess, ConstU16, Polling, + ConstU16, EitherOf, EitherOfDiverse, EnsureOrigin, EqualPrivilegeOnly, MapSuccess, Polling, }, weights::Weight, - pallet_prelude::*, + PalletId, }; -use frame_system::{EnsureRoot, EnsureNever}; +use frame_system::{EnsureNever, EnsureRoot}; +use pallet_collective::EnsureProportionAtLeast; use sp_runtime::{ - Perbill, - traits::{AccountIdConversion, ConstU32, Replace, CheckedSub, Convert}, morph_types, -}; -use crate::{ - Runtime, RuntimeOrigin, RuntimeEvent, RuntimeCall, OriginCaller, Preimage, Balances, Treasury, - Scheduler, Council, TechnicalCommittee, + traits::{AccountIdConversion, CheckedSub, ConstU32, Convert, Replace}, + Perbill, }; pub use up_common::{ - constants::{UNIQUE, DAYS, HOURS, MINUTES, CENTIUNIQUE}, + constants::{CENTIUNIQUE, DAYS, HOURS, MINUTES, UNIQUE}, types::{AccountId, Balance, BlockNumber}, }; -use pallet_collective::EnsureProportionAtLeast; + +use crate::{ + Balances, Council, OriginCaller, Preimage, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + Scheduler, TechnicalCommittee, Treasury, +}; pub mod council; pub use council::*; diff --git a/runtime/common/config/orml.rs b/runtime/common/config/orml.rs index d57b88f572..bf1bf2aac8 100644 --- a/runtime/common/config/orml.rs +++ b/runtime/common/config/orml.rs @@ -20,26 +20,26 @@ use frame_support::{ }; use frame_system::EnsureSigned; use orml_traits::{location::AbsoluteReserveProvider, parameter_type_with_key}; +use pallet_foreign_assets::{CurrencyId, NativeCurrency}; use sp_runtime::traits::Convert; -use xcm::latest::{Weight, Junction::*, Junctions::*, MultiLocation}; -use xcm_executor::XcmExecutor; use sp_std::{vec, vec::Vec}; -use pallet_foreign_assets::{CurrencyId, NativeCurrency}; +use staging_xcm::latest::{Junction::*, Junctions::*, MultiLocation, Weight}; +use staging_xcm_executor::XcmExecutor; +use up_common::{ + constants::*, + types::{AccountId, Balance}, +}; + use crate::{ - Runtime, RuntimeEvent, RelayChainBlockNumberProvider, runtime_common::config::{ - xcm::{ - SelfLocation, Weigher, XcmExecutorConfig, UniversalLocation, - xcm_assets::{CurrencyIdConvert}, - }, pallets::TreasuryAccountId, substrate::{MaxLocks, MaxReserves}, + xcm::{ + xcm_assets::CurrencyIdConvert, SelfLocation, UniversalLocation, Weigher, + XcmExecutorConfig, + }, }, -}; - -use up_common::{ - types::{AccountId, Balance}, - constants::*, + RelayChainBlockNumberProvider, Runtime, RuntimeEvent, }; // Signed version of balance diff --git a/runtime/common/config/pallets/app_promotion.rs b/runtime/common/config/pallets/app_promotion.rs index 39a9add46f..ff2e6fd765 100644 --- a/runtime/common/config/pallets/app_promotion.rs +++ b/runtime/common/config/pallets/app_promotion.rs @@ -14,18 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use crate::{ - runtime_common::config::pallets::{TreasuryAccountId, RelayChainBlockNumberProvider}, - Runtime, Balances, BlockNumber, Unique, RuntimeEvent, EvmContractHelpers, Maintenance, -}; - use frame_support::{parameter_types, PalletId}; use sp_arithmetic::Perbill; use up_common::{ - constants::{UNIQUE, DAYS, RELAY_DAYS}, + constants::{DAYS, RELAY_DAYS, UNIQUE}, types::Balance, }; +use crate::{ + runtime_common::config::pallets::{RelayChainBlockNumberProvider, TreasuryAccountId}, + Balances, BlockNumber, EvmContractHelpers, Maintenance, Runtime, RuntimeEvent, Unique, +}; + parameter_types! { pub const AppPromotionId: PalletId = PalletId(*b"appstake"); pub const RecalculationInterval: BlockNumber = RELAY_DAYS; diff --git a/runtime/common/config/pallets/collator_selection.rs b/runtime/common/config/pallets/collator_selection.rs index ef3863a5e1..86a8a08bda 100644 --- a/runtime/common/config/pallets/collator_selection.rs +++ b/runtime/common/config/pallets/collator_selection.rs @@ -15,23 +15,21 @@ // along with Unique Network. If not, see . use frame_support::{parameter_types, PalletId}; -use crate::{ - Balance, Balances, BlockNumber, Runtime, RuntimeEvent, Aura, Session, SessionKeys, - CollatorSelection, Treasury, - config::pallets::{MaxCollators, SessionPeriod, TreasuryAccountId}, +#[cfg(not(feature = "governance"))] +use frame_system::EnsureRoot; +use pallet_configuration::{ + CollatorSelectionDesiredCollatorsOverride, CollatorSelectionKickThresholdOverride, + CollatorSelectionLicenseBondOverride, }; +use sp_runtime::Perbill; +use up_common::constants::{MILLIUNIQUE, UNIQUE}; #[cfg(feature = "governance")] use crate::config::governance; - -#[cfg(not(feature = "governance"))] -use frame_system::EnsureRoot; - -use sp_runtime::Perbill; -use up_common::constants::{UNIQUE, MILLIUNIQUE}; -use pallet_configuration::{ - CollatorSelectionKickThresholdOverride, CollatorSelectionLicenseBondOverride, - CollatorSelectionDesiredCollatorsOverride, +use crate::{ + config::pallets::{MaxCollators, SessionPeriod, TreasuryAccountId}, + Aura, Balance, Balances, BlockNumber, CollatorSelection, Runtime, RuntimeEvent, + RuntimeHoldReason, Session, SessionKeys, Treasury, }; parameter_types! { pub const SessionOffset: BlockNumber = 0; diff --git a/runtime/common/config/pallets/foreign_asset.rs b/runtime/common/config/pallets/foreign_asset.rs index a93e3679fc..af9d78b402 100644 --- a/runtime/common/config/pallets/foreign_asset.rs +++ b/runtime/common/config/pallets/foreign_asset.rs @@ -1,6 +1,7 @@ -use crate::{Runtime, RuntimeEvent, Balances}; use up_common::types::AccountId; +use crate::{Balances, Runtime, RuntimeEvent}; + impl pallet_foreign_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; diff --git a/runtime/common/config/pallets/mod.rs b/runtime/common/config/pallets/mod.rs index fca5f60010..caba1b889f 100644 --- a/runtime/common/config/pallets/mod.rs +++ b/runtime/common/config/pallets/mod.rs @@ -15,29 +15,30 @@ // along with Unique Network. If not, see . use alloc::string::{String, ToString}; -use frame_support::parameter_types; + +use frame_support::{ + parameter_types, + traits::{ConstU32, ConstU64, Currency}, +}; +use sp_arithmetic::Perbill; use sp_runtime::traits::AccountIdConversion; +use up_common::{ + constants::*, + types::{AccountId, Balance, BlockNumber}, +}; +use up_data_structs::mapping::{CrossTokenAddressMapping, EvmTokenAddressMapping}; + +#[cfg(feature = "governance")] +use crate::runtime_common::config::governance; use crate::{ runtime_common::{ + config::{ethereum::EvmCollectionHelpersAddress, substrate::TreasuryModuleId}, dispatch::CollectionDispatchT, - config::{substrate::TreasuryModuleId, ethereum::EvmCollectionHelpersAddress}, weights::CommonWeights, RelayChainBlockNumberProvider, }, - Runtime, RuntimeEvent, RuntimeCall, VERSION, TOKEN_SYMBOL, DECIMALS, Balances, + Balances, Runtime, RuntimeCall, RuntimeEvent, DECIMALS, TOKEN_SYMBOL, VERSION, }; -use frame_support::traits::{ConstU32, ConstU64, Currency}; -use up_common::{ - types::{AccountId, Balance, BlockNumber}, - constants::*, -}; -use up_data_structs::{ - mapping::{EvmTokenAddressMapping, CrossTokenAddressMapping}, -}; -use sp_arithmetic::Perbill; - -#[cfg(feature = "governance")] -use crate::runtime_common::config::governance; #[cfg(feature = "unique-scheduler")] pub mod scheduler; diff --git a/runtime/common/config/pallets/preimage.rs b/runtime/common/config/pallets/preimage.rs index 1426acb5ce..f60c09cdd2 100644 --- a/runtime/common/config/pallets/preimage.rs +++ b/runtime/common/config/pallets/preimage.rs @@ -16,9 +16,10 @@ use frame_support::parameter_types; use frame_system::EnsureRoot; -use crate::{AccountId, Balance, Balances, Runtime, RuntimeEvent}; use up_common::constants::*; +use crate::{AccountId, Balance, Balances, Runtime, RuntimeEvent}; + parameter_types! { pub PreimageBaseDeposit: Balance = 1000 * UNIQUE; } diff --git a/runtime/common/config/pallets/scheduler.rs b/runtime/common/config/pallets/scheduler.rs index f850a3c800..7035cfa10b 100644 --- a/runtime/common/config/pallets/scheduler.rs +++ b/runtime/common/config/pallets/scheduler.rs @@ -14,21 +14,23 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . +use core::cmp::Ordering; + use frame_support::{ - traits::{PrivilegeCmp, EnsureOrigin}, - weights::Weight, parameter_types, + traits::{EnsureOrigin, PrivilegeCmp}, + weights::Weight, }; use frame_system::{EnsureRoot, RawOrigin}; +use pallet_unique_scheduler_v2::ScheduledEnsureOriginSuccess; +use parity_scale_codec::Decode; use sp_runtime::Perbill; -use core::cmp::Ordering; -use codec::Decode; +use up_common::types::AccountId; + use crate::{ - runtime_common::{scheduler::SchedulerPaymentExecutor, config::substrate::RuntimeBlockWeights}, - Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, OriginCaller, + runtime_common::{config::substrate::RuntimeBlockWeights, scheduler::SchedulerPaymentExecutor}, + OriginCaller, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, }; -use pallet_unique_scheduler_v2::ScheduledEnsureOriginSuccess; -use up_common::types::AccountId; parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(50) * diff --git a/runtime/common/config/parachain.rs b/runtime/common/config/parachain.rs index 5618e4bb17..ad42e39958 100644 --- a/runtime/common/config/parachain.rs +++ b/runtime/common/config/parachain.rs @@ -14,10 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use frame_support::{weights::Weight, parameter_types}; -use crate::{Runtime, RuntimeEvent, XcmpQueue, DmpQueue}; +use frame_support::{parameter_types, weights::Weight}; use up_common::constants::*; +use crate::{DmpQueue, Runtime, RuntimeEvent, XcmpQueue}; + parameter_types! { pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); diff --git a/runtime/common/config/sponsoring.rs b/runtime/common/config/sponsoring.rs index be421b40d3..cb92fb10ee 100644 --- a/runtime/common/config/sponsoring.rs +++ b/runtime/common/config/sponsoring.rs @@ -14,14 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use crate::{ - runtime_common::{sponsoring::UniqueSponsorshipHandler}, - Runtime, -}; use frame_support::parameter_types; use sp_core::U256; use up_common::{constants::*, types::BlockNumber}; +use crate::{runtime_common::sponsoring::UniqueSponsorshipHandler, Runtime}; + parameter_types! { pub const DefaultSponsoringRateLimit: BlockNumber = 1 * DAYS; pub const DefaultSponsoringFeeLimit: U256 = U256::MAX; diff --git a/runtime/common/config/substrate.rs b/runtime/common/config/substrate.rs index c0623bc0dd..f3f1b6344d 100644 --- a/runtime/common/config/substrate.rs +++ b/runtime/common/config/substrate.rs @@ -15,31 +15,32 @@ // along with Unique Network. If not, see . use frame_support::{ - traits::{Everything, ConstU32, NeverEnsureOrigin}, + dispatch::DispatchClass, + ord_parameter_types, parameter_types, + traits::{ConstBool, ConstU32, Everything, NeverEnsureOrigin}, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}, ConstantMultiplier, }, - dispatch::DispatchClass, - parameter_types, ord_parameter_types, PalletId, -}; -use sp_runtime::{ - generic, - traits::{BlakeTwo256, AccountIdLookup}, - Perbill, Permill, Percent, + PalletId, }; -use sp_arithmetic::traits::One; use frame_system::{ limits::{BlockLength, BlockWeights}, EnsureRoot, EnsureSignedBy, }; -use pallet_transaction_payment::{Multiplier, ConstFeeMultiplier}; -use crate::{ - runtime_common::DealWithFees, Runtime, RuntimeEvent, RuntimeCall, RuntimeOrigin, OriginCaller, - PalletInfo, System, Balances, SS58Prefix, Version, +use pallet_transaction_payment::{ConstFeeMultiplier, Multiplier}; +use sp_arithmetic::traits::One; +use sp_runtime::{ + traits::{AccountIdLookup, BlakeTwo256}, + Perbill, Percent, Permill, }; -use up_common::{types::*, constants::*}; use sp_std::vec; +use up_common::{constants::*, types::*}; + +use crate::{ + runtime_common::DealWithFees, Balances, Block, OriginCaller, PalletInfo, Runtime, RuntimeCall, + RuntimeEvent, RuntimeHoldReason, RuntimeOrigin, SS58Prefix, System, Version, +}; parameter_types! { pub const BlockHashCount: BlockNumber = 2400; diff --git a/runtime/common/config/test_pallets.rs b/runtime/common/config/test_pallets.rs index 60441349ad..22c39f96a0 100644 --- a/runtime/common/config/test_pallets.rs +++ b/runtime/common/config/test_pallets.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use crate::{Runtime, RuntimeEvent, RuntimeCall}; +use crate::{Runtime, RuntimeCall, RuntimeEvent}; impl pallet_test_utils::Config for Runtime { type RuntimeEvent = RuntimeEvent; diff --git a/runtime/common/config/xcm/foreignassets.rs b/runtime/common/config/xcm/foreignassets.rs index 02f77f685a..e33b583284 100644 --- a/runtime/common/config/xcm/foreignassets.rs +++ b/runtime/common/config/xcm/foreignassets.rs @@ -14,23 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use frame_support::{traits::Get, parameter_types}; -use sp_runtime::traits::Convert; -use xcm::latest::{prelude::*, MultiAsset, MultiLocation}; -use xcm_builder::{FungiblesAdapter, NoChecking, ConvertedConcreteId}; -use xcm_executor::traits::{TransactAsset, Convert as ConvertXcm, JustTry}; -use pallet_foreign_assets::{ - AssetIds, AssetIdMapping, XcmForeignAssetIdMapping, NativeCurrency, FreeForAll, TryAsForeign, - ForeignAssetId, CurrencyId, -}; -use sp_std::{borrow::Borrow, marker::PhantomData}; +use frame_support::{parameter_types, traits::Get}; use orml_traits::location::AbsoluteReserveProvider; use orml_xcm_support::MultiNativeAsset; -use crate::{Runtime, Balances, ParachainInfo, PolkadotXcm, ForeignAssets}; +use pallet_foreign_assets::{ + AssetId, AssetIdMapping, CurrencyId, ForeignAssetId, FreeForAll, NativeCurrency, TryAsForeign, + XcmForeignAssetIdMapping, +}; +use sp_runtime::traits::{Convert, MaybeEquivalence}; +use sp_std::marker::PhantomData; +use staging_xcm::latest::{prelude::*, MultiAsset, MultiLocation}; +use staging_xcm_builder::{ConvertedConcreteId, FungiblesAdapter, NoChecking}; +use staging_xcm_executor::traits::{JustTry, TransactAsset}; +use up_common::types::{AccountId, Balance}; use super::{LocationToAccountId, RelayLocation}; - -use up_common::types::{AccountId, Balance}; +use crate::{Balances, ForeignAssets, ParachainInfo, PolkadotXcm, Runtime}; parameter_types! { pub CheckingAccount: AccountId = PolkadotXcm::check_account(); diff --git a/runtime/common/config/xcm/mod.rs b/runtime/common/config/xcm/mod.rs index 130355f6cb..c810babe2e 100644 --- a/runtime/common/config/xcm/mod.rs +++ b/runtime/common/config/xcm/mod.rs @@ -15,28 +15,33 @@ // along with Unique Network. If not, see . use frame_support::{ - traits::{Everything, Nothing, Get, ConstU32, ProcessMessageError, Contains}, parameter_types, + traits::{ConstU32, Contains, Everything, Get, Nothing, ProcessMessageError}, }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use polkadot_parachain::primitives::Sibling; -use xcm::latest::{prelude::*, Weight, MultiLocation}; -use xcm::v3::Instruction; -use xcm_builder::{ - AccountId32Aliases, EnsureXcmOrigin, FixedWeightBounds, ParentAsSuperuser, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, ParentIsPreset, -}; -use xcm_executor::{XcmExecutor, traits::ShouldExecute}; +use polkadot_parachain_primitives::primitives::Sibling; use sp_std::marker::PhantomData; -use crate::{ - Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, ParachainInfo, ParachainSystem, PolkadotXcm, - XcmpQueue, xcm_barrier::Barrier, RelayNetwork, AllPalletsWithSystem, Balances, +use staging_xcm::{ + latest::{prelude::*, MultiLocation, Weight}, + v3::Instruction, +}; +use staging_xcm_builder::{ + AccountId32Aliases, EnsureXcmOrigin, FixedWeightBounds, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, +}; +use staging_xcm_executor::{ + traits::{Properties, ShouldExecute}, + XcmExecutor, }; - use up_common::types::AccountId; +use crate::{ + xcm_barrier::Barrier, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, + PolkadotXcm, RelayNetwork, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, XcmpQueue, +}; + #[cfg(feature = "foreign-assets")] pub mod foreignassets; @@ -45,15 +50,13 @@ pub mod nativeassets; #[cfg(feature = "foreign-assets")] pub use foreignassets as xcm_assets; - #[cfg(not(feature = "foreign-assets"))] pub use nativeassets as xcm_assets; +use xcm_assets::{AssetTransactor, IsReserve, Trader}; #[cfg(feature = "governance")] use crate::runtime_common::config::governance; -use xcm_assets::{AssetTransactor, IsReserve, Trader}; - parameter_types! { pub const RelayLocation: MultiLocation = MultiLocation::parent(); pub RelayOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); diff --git a/runtime/common/config/xcm/nativeassets.rs b/runtime/common/config/xcm/nativeassets.rs index c721eb4385..71c4c7b7cb 100644 --- a/runtime/common/config/xcm/nativeassets.rs +++ b/runtime/common/config/xcm/nativeassets.rs @@ -14,31 +14,28 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . +use cumulus_primitives_core::XcmContext; use frame_support::{ - traits::{tokens::currency::Currency as CurrencyT, OnUnbalanced as OnUnbalancedT, Get}, + traits::{tokens::currency::Currency as CurrencyT, Get, OnUnbalanced as OnUnbalancedT}, weights::WeightToFeePolynomial, }; -use sp_runtime::traits::{CheckedConversion, Zero, Convert}; -use xcm::latest::{ - AssetId::{Concrete}, - Fungibility::Fungible as XcmFungible, - MultiAsset, Error as XcmError, Weight, - Junction::*, - MultiLocation, - Junctions::*, +use pallet_foreign_assets::{AssetIds, NativeCurrency}; +use sp_runtime::traits::{CheckedConversion, Convert, Zero}; +use sp_std::marker::PhantomData; +use staging_xcm::latest::{ + AssetId::Concrete, Error as XcmError, Fungibility::Fungible as XcmFungible, Junction::*, + Junctions::*, MultiAsset, MultiLocation, Weight, }; -use xcm_builder::{CurrencyAdapter, NativeAsset}; -use xcm_executor::{ - Assets, +use staging_xcm_builder::{CurrencyAdapter, NativeAsset}; +use staging_xcm_executor::{ traits::{MatchesFungible, WeightTrader}, + Assets, }; -use pallet_foreign_assets::{AssetIds, NativeCurrency}; -use sp_std::marker::PhantomData; -use crate::{Balances, ParachainInfo}; -use super::{LocationToAccountId, RelayLocation}; - use up_common::types::{AccountId, Balance}; +use super::{LocationToAccountId, RelayLocation}; +use crate::{Balances, ParachainInfo}; + pub struct OnlySelfCurrency; impl> MatchesFungible for OnlySelfCurrency { fn matches_fungible(a: &MultiAsset) -> Option { diff --git a/runtime/common/dispatch.rs b/runtime/common/dispatch.rs index 4f17069bc5..5f4cea73b1 100644 --- a/runtime/common/dispatch.rs +++ b/runtime/common/dispatch.rs @@ -15,29 +15,28 @@ // along with Unique Network. If not, see . use frame_support::{dispatch::DispatchResult, ensure, fail}; -use pallet_evm::{PrecompileHandle, PrecompileResult}; -use sp_core::H160; -use sp_runtime::DispatchError; -use sp_std::{borrow::ToOwned, vec::Vec}; +use pallet_balances_adapter::NativeFungibleHandle; +pub use pallet_common::dispatch::CollectionDispatch; +#[cfg(not(feature = "refungible"))] +use pallet_common::unsupported; use pallet_common::{ - CollectionById, CollectionHandle, CommonCollectionOperations, erc::CommonEvmHandler, - eth::map_eth_to_id, + erc::CommonEvmHandler, eth::map_eth_to_id, CollectionById, CollectionHandle, + CommonCollectionOperations, }; -pub use pallet_common::dispatch::CollectionDispatch; -use pallet_fungible::{Pallet as PalletFungible, FungibleHandle}; -use pallet_balances_adapter::NativeFungibleHandle; -use pallet_nonfungible::{Pallet as PalletNonfungible, NonfungibleHandle}; +use pallet_evm::{PrecompileHandle, PrecompileResult}; +use pallet_fungible::{FungibleHandle, Pallet as PalletFungible}; +use pallet_nonfungible::{NonfungibleHandle, Pallet as PalletNonfungible}; use pallet_refungible::{ - Pallet as PalletRefungible, RefungibleHandle, erc_token::RefungibleTokenHandle, + erc_token::RefungibleTokenHandle, Pallet as PalletRefungible, RefungibleHandle, }; +use sp_core::H160; +use sp_runtime::DispatchError; +use sp_std::{borrow::ToOwned, vec::Vec}; use up_data_structs::{ - CollectionMode, CreateCollectionData, MAX_DECIMAL_POINTS, mapping::TokenAddressMapping, - CollectionId, + mapping::TokenAddressMapping, CollectionId, CollectionMode, CreateCollectionData, + MAX_DECIMAL_POINTS, }; -#[cfg(not(feature = "refungible"))] -use pallet_common::unsupported; - pub enum CollectionDispatchT where T: pallet_fungible::Config diff --git a/runtime/common/ethereum/precompiles/mod.rs b/runtime/common/ethereum/precompiles/mod.rs index a243a03a0b..af52cb21eb 100644 --- a/runtime/common/ethereum/precompiles/mod.rs +++ b/runtime/common/ethereum/precompiles/mod.rs @@ -14,11 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use pallet_evm::{Precompile, PrecompileHandle, PrecompileResult, PrecompileSet, IsPrecompileResult}; +use pallet_evm::{ + IsPrecompileResult, Precompile, PrecompileHandle, PrecompileResult, PrecompileSet, +}; +use pallet_evm_precompile_simple::ECRecover; use sp_core::H160; use sp_std::marker::PhantomData; - -use pallet_evm_precompile_simple::{ECRecover}; use sr25519::Sr25519Precompile; mod sr25519; diff --git a/runtime/common/ethereum/precompiles/sr25519.rs b/runtime/common/ethereum/precompiles/sr25519.rs index ea3b273811..9d8fd23f40 100644 --- a/runtime/common/ethereum/precompiles/sr25519.rs +++ b/runtime/common/ethereum/precompiles/sr25519.rs @@ -17,8 +17,7 @@ use fp_evm::{Context, ExitSucceed, PrecompileHandle, PrecompileOutput}; use pallet_evm::Precompile; use sp_core::{crypto::UncheckedFrom, sr25519, H256}; -use sp_std::marker::PhantomData; -use sp_std::prelude::*; +use sp_std::{marker::PhantomData, prelude::*}; use super::utils::{Bytes, EvmDataReader, EvmDataWriter, EvmResult, FunctionModifier, Gasometer}; diff --git a/runtime/common/ethereum/precompiles/utils/data.rs b/runtime/common/ethereum/precompiles/utils/data.rs index 4ff95193f0..9b0160f6c5 100644 --- a/runtime/common/ethereum/precompiles/utils/data.rs +++ b/runtime/common/ethereum/precompiles/utils/data.rs @@ -16,12 +16,12 @@ // You should have received a copy of the GNU General Public License // along with Utils. If not, see . -use super::{EvmResult, Gasometer}; - -use sp_std::borrow::ToOwned; use core::{any::type_name, ops::Range}; + use sp_core::{H160, H256, U256}; -use sp_std::{convert::TryInto, vec, vec::Vec}; +use sp_std::{borrow::ToOwned, convert::TryInto, vec, vec::Vec}; + +use super::{EvmResult, Gasometer}; /// The `address` type of Solidity. /// H160 could represent 2 types of data (bytes20 and address) that are not encoded the same way. diff --git a/runtime/common/ethereum/precompiles/utils/macro/src/lib.rs b/runtime/common/ethereum/precompiles/utils/macro/src/lib.rs index f9437fe043..0487280d9c 100644 --- a/runtime/common/ethereum/precompiles/utils/macro/src/lib.rs +++ b/runtime/common/ethereum/precompiles/utils/macro/src/lib.rs @@ -19,11 +19,12 @@ #![crate_type = "proc-macro"] extern crate proc_macro; +use std::convert::TryInto; + use proc_macro::TokenStream; use proc_macro2::Literal; use quote::{quote, quote_spanned}; use sha3::{Digest, Keccak256}; -use std::convert::TryInto; use syn::{parse_macro_input, spanned::Spanned, Expr, ExprLit, Ident, ItemEnum, Lit}; /// This macro allows to associate to each variant of an enumeration a discriminant (of type u32 diff --git a/runtime/common/ethereum/precompiles/utils/mod.rs b/runtime/common/ethereum/precompiles/utils/mod.rs index 84582c7aca..ca63811efe 100644 --- a/runtime/common/ethereum/precompiles/utils/mod.rs +++ b/runtime/common/ethereum/precompiles/utils/mod.rs @@ -16,10 +16,9 @@ // You should have received a copy of the GNU General Public License // along with Utils. If not, see . -use sp_std::borrow::ToOwned; use fp_evm::{Context, ExitRevert, PrecompileFailure}; use sp_core::U256; -use sp_std::marker::PhantomData; +use sp_std::{borrow::ToOwned, marker::PhantomData}; mod data; diff --git a/runtime/common/ethereum/self_contained_call.rs b/runtime/common/ethereum/self_contained_call.rs index 7a5d245776..131c01a8c7 100644 --- a/runtime/common/ethereum/self_contained_call.rs +++ b/runtime/common/ethereum/self_contained_call.rs @@ -16,10 +16,11 @@ use sp_core::H160; use sp_runtime::{ - traits::{Dispatchable, DispatchInfoOf, PostDispatchInfoOf}, - transaction_validity::{TransactionValidityError, TransactionValidity, InvalidTransaction}, + traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf}, + transaction_validity::{InvalidTransaction, TransactionValidity, TransactionValidityError}, }; -use crate::{RuntimeOrigin, RuntimeCall, Maintenance}; + +use crate::{Maintenance, RuntimeCall, RuntimeOrigin}; impl fp_self_contained::SelfContainedCall for RuntimeCall { type SignedInfo = H160; diff --git a/runtime/common/ethereum/sponsoring.rs b/runtime/common/ethereum/sponsoring.rs index d6c220b8a3..18f90a62e7 100644 --- a/runtime/common/ethereum/sponsoring.rs +++ b/runtime/common/ethereum/sponsoring.rs @@ -17,34 +17,35 @@ //! Implements EVM sponsoring logic via TransactionValidityHack use core::{convert::TryInto, marker::PhantomData}; -use evm_coder::{Call}; -use pallet_common::{CollectionHandle, eth::map_eth_to_id}; + +use evm_coder::Call; +use pallet_common::{eth::map_eth_to_id, CollectionHandle}; use pallet_evm::account::CrossAccountId; use pallet_evm_transaction_payment::CallContext; +use pallet_fungible::{ + erc::{ERC20Call, UniqueFungibleCall}, + Config as FungibleConfig, +}; use pallet_nonfungible::{ - Config as NonfungibleConfig, Pallet as NonfungiblePallet, NonfungibleHandle, erc::{ - UniqueNFTCall, ERC721UniqueExtensionsCall, ERC721UniqueMintableCall, ERC721Call, - TokenPropertiesCall, + ERC721Call, ERC721UniqueExtensionsCall, ERC721UniqueMintableCall, TokenPropertiesCall, + UniqueNFTCall, }, -}; -use pallet_fungible::{ - Config as FungibleConfig, - erc::{UniqueFungibleCall, ERC20Call}, + Config as NonfungibleConfig, NonfungibleHandle, Pallet as NonfungiblePallet, }; use pallet_refungible::{ - Config as RefungibleConfig, erc::UniqueRefungibleCall, erc_token::{RefungibleTokenHandle, UniqueRefungibleTokenCall}, - RefungibleHandle, + Config as RefungibleConfig, RefungibleHandle, }; use pallet_unique::Config as UniqueConfig; use sp_std::prelude::*; use up_data_structs::{ - CollectionMode, CreateItemData, CreateNftData, mapping::TokenAddressMapping, TokenId, + mapping::TokenAddressMapping, CollectionMode, CreateItemData, CreateNftData, TokenId, }; use up_sponsorship::SponsorshipHandler; -use crate::{Runtime, runtime_common::sponsoring::*}; + +use crate::{runtime_common::sponsoring::*, Runtime}; mod refungible; @@ -206,9 +207,9 @@ where } mod common { - use super::*; + use pallet_common::erc::CollectionCall; - use pallet_common::erc::{CollectionCall}; + use super::*; pub fn collection_call_sponsor( call: CollectionCall, diff --git a/runtime/common/ethereum/sponsoring/refungible.rs b/runtime/common/ethereum/sponsoring/refungible.rs index 1597f5c9d1..c0cc6b451a 100644 --- a/runtime/common/ethereum/sponsoring/refungible.rs +++ b/runtime/common/ethereum/sponsoring/refungible.rs @@ -19,14 +19,7 @@ use pallet_common::CollectionHandle; use pallet_evm::account::CrossAccountId; use pallet_fungible::Config as FungibleConfig; -use pallet_refungible::Config as RefungibleConfig; use pallet_nonfungible::Config as NonfungibleConfig; -use pallet_unique::Config as UniqueConfig; -use up_data_structs::{CreateItemData, CreateNftData, TokenId}; - -use super::common; -use crate::runtime_common::sponsoring::*; - use pallet_refungible::{ erc::{ ERC721BurnableCall, ERC721Call, ERC721EnumerableCall, ERC721MetadataCall, @@ -37,7 +30,13 @@ use pallet_refungible::{ ERC1633Call, ERC20Call, ERC20UniqueExtensionsCall, RefungibleTokenHandle, UniqueRefungibleTokenCall, }, + Config as RefungibleConfig, }; +use pallet_unique::Config as UniqueConfig; +use up_data_structs::{CreateItemData, CreateNftData, TokenId}; + +use super::common; +use crate::runtime_common::sponsoring::*; pub fn call_sponsor( call: UniqueRefungibleCall, diff --git a/runtime/common/identity.rs b/runtime/common/identity.rs index 330095a86d..b0c03cf483 100644 --- a/runtime/common/identity.rs +++ b/runtime/common/identity.rs @@ -14,18 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . +use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; -use codec::{Encode, Decode}; -use up_common::types::AccountId; -use crate::RuntimeCall; - +#[cfg(feature = "collator-selection")] +use sp_runtime::transaction_validity::InvalidTransaction; use sp_runtime::{ traits::{DispatchInfoOf, SignedExtension}, - transaction_validity::{TransactionValidity, ValidTransaction, TransactionValidityError}, + transaction_validity::{TransactionValidity, TransactionValidityError, ValidTransaction}, }; +use up_common::types::AccountId; -#[cfg(feature = "collator-selection")] -use sp_runtime::transaction_validity::InvalidTransaction; +use crate::RuntimeCall; #[derive(Debug, Encode, Decode, PartialEq, Eq, Clone, TypeInfo)] pub struct DisableIdentityCalls; diff --git a/runtime/common/instance.rs b/runtime/common/instance.rs index 50bd42bcb8..2df06bcd14 100644 --- a/runtime/common/instance.rs +++ b/runtime/common/instance.rs @@ -1,9 +1,7 @@ -use crate::{ - runtime_common::{config::ethereum::CrossAccountId}, - Runtime, -}; use up_common::types::opaque::RuntimeInstance; +use crate::{runtime_common::config::ethereum::CrossAccountId, Runtime}; + impl RuntimeInstance for Runtime { type CrossAccountId = CrossAccountId; } diff --git a/runtime/common/maintenance.rs b/runtime/common/maintenance.rs index 96d206c502..f30a143725 100644 --- a/runtime/common/maintenance.rs +++ b/runtime/common/maintenance.rs @@ -14,17 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . +use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; -use codec::{Encode, Decode}; -use up_common::types::AccountId; -use crate::{RuntimeCall, Maintenance}; - use sp_runtime::{ traits::{DispatchInfoOf, SignedExtension}, transaction_validity::{ - TransactionValidity, ValidTransaction, InvalidTransaction, TransactionValidityError, + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, }; +use up_common::types::AccountId; + +use crate::{Maintenance, RuntimeCall}; #[derive(Debug, Encode, Decode, PartialEq, Eq, Clone, TypeInfo)] pub struct CheckMaintenance; diff --git a/runtime/common/mod.rs b/runtime/common/mod.rs index 3a5d4040f4..932f2a031b 100644 --- a/runtime/common/mod.rs +++ b/runtime/common/mod.rs @@ -33,26 +33,23 @@ pub mod weights; #[cfg(test)] pub mod tests; -use sp_core::H160; use frame_support::{ - traits::{Currency, OnUnbalanced, Imbalance}, + traits::{Currency, Imbalance, OnUnbalanced}, weights::Weight, }; use sp_runtime::{ - generic, + generic, impl_opaque_keys, traits::{BlakeTwo256, BlockNumberProvider}, - impl_opaque_keys, }; use sp_std::vec::Vec; - #[cfg(feature = "std")] use sp_version::NativeVersion; +use up_common::types::{AccountId, BlockNumber}; use crate::{ - Runtime, RuntimeCall, Balances, Treasury, Aura, Signature, AllPalletsWithSystem, - InherentDataExt, + AllPalletsWithSystem, Aura, Balances, InherentDataExt, Runtime, RuntimeCall, Signature, + Treasury, }; -use up_common::types::{AccountId, BlockNumber}; #[macro_export] macro_rules! unsupported { @@ -175,7 +172,7 @@ impl cumulus_pallet_parachain_system::CheckInherents for CheckInherents { } } -#[derive(codec::Encode, codec::Decode)] +#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode)] pub enum XCMPMessage { /// Transfer tokens to the given account from the Parachain account. TransferToken(XAccountId, XBalance), @@ -186,9 +183,10 @@ impl frame_support::traits::OnRuntimeUpgrade for AuraToCollatorSelection { fn on_runtime_upgrade() -> Weight { #[cfg(feature = "collator-selection")] { - use frame_support::{BoundedVec, storage::migration}; - use sp_runtime::{traits::OpaqueKeys, RuntimeAppPublic}; + use frame_support::{storage::migration, BoundedVec}; use pallet_session::SessionManager; + use sp_runtime::{traits::OpaqueKeys, RuntimeAppPublic}; + use crate::config::pallets::MaxCollators; let mut weight = ::DbWeight::get().reads(1); diff --git a/runtime/common/scheduler.rs b/runtime/common/scheduler.rs index 88ee4c7222..2c29e74fe9 100644 --- a/runtime/common/scheduler.rs +++ b/runtime/common/scheduler.rs @@ -14,20 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use frame_support::{ - dispatch::{GetDispatchInfo, PostDispatchInfo, DispatchInfo}, -}; +use fp_self_contained::SelfContainedCall; +use frame_support::dispatch::{DispatchInfo, GetDispatchInfo, PostDispatchInfo}; +use pallet_transaction_payment::ChargeTransactionPayment; +use pallet_unique_scheduler_v2::DispatchCall; +use parity_scale_codec::Encode; use sp_runtime::{ - traits::{Dispatchable, Applyable, Member}, + traits::{Applyable, Dispatchable, Member}, transaction_validity::TransactionValidityError, DispatchErrorWithPostInfo, }; -use codec::Encode; -use crate::{Runtime, RuntimeCall, RuntimeOrigin, maintenance}; use up_common::types::AccountId; -use fp_self_contained::SelfContainedCall; -use pallet_unique_scheduler_v2::DispatchCall; -use pallet_transaction_payment::ChargeTransactionPayment; + +use crate::{maintenance, Runtime, RuntimeCall, RuntimeOrigin}; /// The SignedExtension to the basic transaction logic. pub type SignedExtraScheduler = ( diff --git a/runtime/common/sponsoring.rs b/runtime/common/sponsoring.rs index 74f894040a..3dccc7e02f 100644 --- a/runtime/common/sponsoring.rs +++ b/runtime/common/sponsoring.rs @@ -15,25 +15,25 @@ // along with Unique Network. If not, see . use core::marker::PhantomData; -use up_sponsorship::SponsorshipHandler; -use frame_support::{ - traits::{IsSubType}, -}; -use up_data_structs::{ - CollectionId, FUNGIBLE_SPONSOR_TRANSFER_TIMEOUT, NFT_SPONSOR_TRANSFER_TIMEOUT, - REFUNGIBLE_SPONSOR_TRANSFER_TIMEOUT, TokenId, CollectionMode, CreateItemData, -}; -use sp_runtime::traits::Saturating; -use pallet_common::{CollectionHandle}; + +use frame_support::traits::IsSubType; +use frame_system::pallet_prelude::*; +use pallet_common::CollectionHandle; use pallet_evm::account::CrossAccountId; -use pallet_unique::{ - Call as UniqueCall, Config as UniqueConfig, FungibleApproveBasket, RefungibleApproveBasket, - NftApproveBasket, CreateItemBasket, ReFungibleTransferBasket, FungibleTransferBasket, - NftTransferBasket, TokenPropertyBasket, -}; use pallet_fungible::Config as FungibleConfig; use pallet_nonfungible::Config as NonfungibleConfig; use pallet_refungible::Config as RefungibleConfig; +use pallet_unique::{ + Call as UniqueCall, Config as UniqueConfig, CreateItemBasket, FungibleApproveBasket, + FungibleTransferBasket, NftApproveBasket, NftTransferBasket, ReFungibleTransferBasket, + RefungibleApproveBasket, TokenPropertyBasket, +}; +use sp_runtime::traits::Saturating; +use up_data_structs::{ + CollectionId, CollectionMode, CreateItemData, TokenId, FUNGIBLE_SPONSOR_TRANSFER_TIMEOUT, + NFT_SPONSOR_TRANSFER_TIMEOUT, REFUNGIBLE_SPONSOR_TRANSFER_TIMEOUT, +}; +use up_sponsorship::SponsorshipHandler; pub trait Config: UniqueConfig + FungibleConfig + NonfungibleConfig + RefungibleConfig {} impl Config for T where T: UniqueConfig + FungibleConfig + NonfungibleConfig + RefungibleConfig {} diff --git a/runtime/common/tests/mod.rs b/runtime/common/tests/mod.rs index fb07dad238..fd1a4c3af9 100644 --- a/runtime/common/tests/mod.rs +++ b/runtime/common/tests/mod.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . +use sp_core::{Pair, Public}; +pub use sp_runtime::AccountId32 as AccountId; use sp_runtime::{BuildStorage, Storage}; -use sp_core::{Public, Pair}; use up_common::types::AuraId; -use crate::{Runtime, GenesisConfig, ParachainInfoConfig, RuntimeEvent, System}; -pub use sp_runtime::AccountId32 as AccountId; +use crate::{BuildGenesisConfig, ParachainInfoConfig, Runtime, RuntimeEvent, System}; pub type Balance = u128; pub mod xcm; @@ -62,9 +62,10 @@ fn new_test_ext(balances: Vec<(AccountId, Balance)>) -> sp_io::TestExternalities #[cfg(feature = "collator-selection")] fn make_basic_storage() -> Storage { - use sp_core::{sr25519}; + use sp_core::sr25519; use sp_runtime::traits::{IdentifyAccount, Verify}; - use crate::{AccountId, Signature, SessionKeys, CollatorSelectionConfig, SessionConfig}; + + use crate::{AccountId, CollatorSelectionConfig, SessionConfig, SessionKeys, Signature}; type AccountPublic = ::Signer; diff --git a/runtime/common/tests/xcm.rs b/runtime/common/tests/xcm.rs index cbe7ef5dbb..ed8bbe5532 100644 --- a/runtime/common/tests/xcm.rs +++ b/runtime/common/tests/xcm.rs @@ -14,14 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use xcm::{ - VersionedXcm, +use frame_support::pallet_prelude::Weight; +use parity_scale_codec::Encode; +use staging_xcm::{ latest::{prelude::*, Error}, + VersionedXcm, }; -use codec::Encode; -use crate::{Runtime, RuntimeCall, RuntimeOrigin, RuntimeEvent, PolkadotXcm}; -use super::{new_test_ext, last_events, AccountId}; -use frame_support::{pallet_prelude::Weight}; + +use super::{last_events, new_test_ext, AccountId}; +use crate::{PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin}; const ALICE: AccountId = AccountId::new([0u8; 32]); const BOB: AccountId = AccountId::new([1u8; 32]); diff --git a/runtime/common/weights/mod.rs b/runtime/common/weights/mod.rs index 2de8cc1fe5..224695b17b 100644 --- a/runtime/common/weights/mod.rs +++ b/runtime/common/weights/mod.rs @@ -15,20 +15,21 @@ // along with Unique Network. If not, see . use core::marker::PhantomData; -use frame_support::{weights::Weight}; -use pallet_common::{CommonWeightInfo, dispatch::dispatch_weight, RefungibleExtensionsWeightInfo}; +use frame_support::weights::Weight; use pallet_balances_adapter::{ - Config as NativeFungibleConfig, common::CommonWeights as NativeFungibleWeights, + common::CommonWeights as NativeFungibleWeights, Config as NativeFungibleConfig, +}; +use pallet_common::{dispatch::dispatch_weight, CommonWeightInfo, RefungibleExtensionsWeightInfo}; +use pallet_fungible::{common::CommonWeights as FungibleWeights, Config as FungibleConfig}; +use pallet_nonfungible::{ + common::CommonWeights as NonfungibleWeights, Config as NonfungibleConfig, }; -use pallet_fungible::{Config as FungibleConfig, common::CommonWeights as FungibleWeights}; -use pallet_nonfungible::{Config as NonfungibleConfig, common::CommonWeights as NonfungibleWeights}; - #[cfg(feature = "refungible")] use pallet_refungible::{ - Config as RefungibleConfig, weights::WeightInfo, common::CommonWeights as RefungibleWeights, + common::CommonWeights as RefungibleWeights, weights::WeightInfo, Config as RefungibleConfig, }; -use up_data_structs::{CreateItemExData, CreateItemData}; +use up_data_structs::{CreateItemData, CreateItemExData}; pub mod xcm; diff --git a/runtime/opal/src/lib.rs b/runtime/opal/src/lib.rs index 46ff8248a7..25a8068705 100644 --- a/runtime/opal/src/lib.rs +++ b/runtime/opal/src/lib.rs @@ -27,15 +27,12 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); extern crate alloc; +use ::staging_xcm::latest::NetworkId; use frame_support::parameter_types; - -use sp_version::RuntimeVersion; use sp_runtime::create_runtime_str; - +use sp_version::RuntimeVersion; use up_common::types::*; -use ::xcm::latest::NetworkId; - mod runtime_common; pub mod governance_timings; diff --git a/runtime/opal/src/xcm_barrier.rs b/runtime/opal/src/xcm_barrier.rs index 44f664b78a..bd0d90b496 100644 --- a/runtime/opal/src/xcm_barrier.rs +++ b/runtime/opal/src/xcm_barrier.rs @@ -16,7 +16,7 @@ use frame_support::{match_types, traits::Everything}; use xcm::latest::{Junctions::*, MultiLocation}; -use xcm_builder::{AllowTopLevelPaidExecutionFrom, TakeWeightCredit, AllowExplicitUnpaidExecutionFrom}; +use staging_xcm_builder::{AllowTopLevelPaidExecutionFrom, TakeWeightCredit, AllowExplicitUnpaidExecutionFrom}; match_types! { pub type ParentOnly: impl Contains = { diff --git a/runtime/quartz/src/lib.rs b/runtime/quartz/src/lib.rs index 37fc4d72be..2943553aae 100644 --- a/runtime/quartz/src/lib.rs +++ b/runtime/quartz/src/lib.rs @@ -27,15 +27,12 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); extern crate alloc; +use ::staging_xcm::latest::NetworkId; use frame_support::parameter_types; - -use sp_version::RuntimeVersion; use sp_runtime::create_runtime_str; - +use sp_version::RuntimeVersion; use up_common::types::*; -use ::xcm::latest::NetworkId; - mod runtime_common; pub mod governance_timings; diff --git a/runtime/quartz/src/xcm_barrier.rs b/runtime/quartz/src/xcm_barrier.rs index a774924e30..654547c2cf 100644 --- a/runtime/quartz/src/xcm_barrier.rs +++ b/runtime/quartz/src/xcm_barrier.rs @@ -15,8 +15,8 @@ // along with Unique Network. If not, see . use frame_support::{match_types, traits::Everything}; -use xcm::latest::{Junctions::*, MultiLocation}; -use xcm_builder::{ +use staging_xcm::latest::{Junctions::*, MultiLocation}; +use staging_xcm_builder::{ AllowKnownQueryResponses, AllowSubscriptionsFrom, TakeWeightCredit, AllowTopLevelPaidExecutionFrom, AllowExplicitUnpaidExecutionFrom, }; diff --git a/runtime/tests/src/lib.rs b/runtime/tests/src/lib.rs index 30e08e9203..0dea323b78 100644 --- a/runtime/tests/src/lib.rs +++ b/runtime/tests/src/lib.rs @@ -16,27 +16,26 @@ #![allow(clippy::from_over_into)] -use sp_core::{H160, H256, U256}; use frame_support::{ + pallet_prelude::Weight, parameter_types, - traits::{Everything, ConstU32, ConstU64, fungible::Inspect}, + traits::{fungible::Inspect, ConstU32, ConstU64, Everything}, weights::IdentityFee, - pallet_prelude::Weight, }; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - testing::Header, -}; -use pallet_transaction_payment::CurrencyAdapter; use frame_system as system; +use pallet_ethereum::PostLogContent; use pallet_evm::{ - AddressMapping, account::CrossAccountId, EnsureAddressNever, SubstrateBlockHashMapping, - BackwardsAddressMapping, + account::CrossAccountId, AddressMapping, BackwardsAddressMapping, EnsureAddressNever, + SubstrateBlockHashMapping, }; -use pallet_ethereum::PostLogContent; -use codec::{Encode, Decode, MaxEncodedLen}; +use pallet_transaction_payment::CurrencyAdapter; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; - +use sp_core::{H160, H256, U256}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; use up_data_structs::mapping::{CrossTokenAddressMapping, EvmTokenAddressMapping}; #[path = "../../common/dispatch.rs"] diff --git a/runtime/tests/src/tests.rs b/runtime/tests/src/tests.rs index 73862f28aa..9f10c0eb65 100644 --- a/runtime/tests/src/tests.rs +++ b/runtime/tests/src/tests.rs @@ -15,19 +15,22 @@ // along with Unique Network. If not, see . // Tests to be written here -use crate::{Test, TestCrossAccountId, CollectionCreationPrice, RuntimeOrigin, Unique, new_test_ext}; -use up_data_structs::{ - COLLECTION_NUMBER_LIMIT, CollectionId, CreateItemData, CreateFungibleData, CreateNftData, - CreateReFungibleData, MAX_DECIMAL_POINTS, COLLECTION_ADMINS_LIMIT, TokenId, - MAX_TOKEN_OWNERSHIP, CreateCollectionData, CollectionMode, AccessMode, CollectionPermissions, - PropertyKeyPermission, PropertyPermission, Property, CollectionPropertiesVec, - CollectionPropertiesPermissionsVec, -}; -use frame_support::{assert_noop, assert_ok, assert_err}; -use sp_std::convert::TryInto; -use pallet_evm::account::CrossAccountId; +use frame_support::{assert_err, assert_noop, assert_ok}; use pallet_common::Error as CommonError; +use pallet_evm::account::CrossAccountId; use pallet_unique::Error as UniqueError; +use sp_std::convert::TryInto; +use up_data_structs::{ + AccessMode, CollectionId, CollectionMode, CollectionPermissions, + CollectionPropertiesPermissionsVec, CollectionPropertiesVec, CreateCollectionData, + CreateFungibleData, CreateItemData, CreateNftData, CreateReFungibleData, Property, + PropertyKeyPermission, PropertyPermission, TokenId, COLLECTION_ADMINS_LIMIT, + COLLECTION_NUMBER_LIMIT, MAX_DECIMAL_POINTS, MAX_TOKEN_OWNERSHIP, +}; + +use crate::{ + new_test_ext, CollectionCreationPrice, RuntimeOrigin, Test, TestCrossAccountId, Unique, +}; fn add_balance(user: u64, value: u64) { const DONOR_USER: u64 = 999; @@ -2617,9 +2620,10 @@ fn collection_sponsoring() { } mod check_token_permissions { - use super::*; use pallet_common::LazyValue; + use super::*; + fn test bool>( i: usize, test_case: &pallet_common::tests::TestCase, diff --git a/runtime/unique/src/lib.rs b/runtime/unique/src/lib.rs index 6ca1f190ce..462f3a155a 100644 --- a/runtime/unique/src/lib.rs +++ b/runtime/unique/src/lib.rs @@ -27,15 +27,12 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); extern crate alloc; +use ::staging_xcm::latest::NetworkId; use frame_support::parameter_types; - -use sp_version::RuntimeVersion; use sp_runtime::create_runtime_str; - +use sp_version::RuntimeVersion; use up_common::types::*; -use ::xcm::latest::NetworkId; - mod runtime_common; pub mod governance_timings; diff --git a/runtime/unique/src/xcm_barrier.rs b/runtime/unique/src/xcm_barrier.rs index a774924e30..654547c2cf 100644 --- a/runtime/unique/src/xcm_barrier.rs +++ b/runtime/unique/src/xcm_barrier.rs @@ -15,8 +15,8 @@ // along with Unique Network. If not, see . use frame_support::{match_types, traits::Everything}; -use xcm::latest::{Junctions::*, MultiLocation}; -use xcm_builder::{ +use staging_xcm::latest::{Junctions::*, MultiLocation}; +use staging_xcm_builder::{ AllowKnownQueryResponses, AllowSubscriptionsFrom, TakeWeightCredit, AllowTopLevelPaidExecutionFrom, AllowExplicitUnpaidExecutionFrom, }; diff --git a/test-pallets/utils/src/lib.rs b/test-pallets/utils/src/lib.rs index 05459931b8..1f9c3b0ed2 100644 --- a/test-pallets/utils/src/lib.rs +++ b/test-pallets/utils/src/lib.rs @@ -16,18 +16,19 @@ #![cfg_attr(not(feature = "std"), no_std)] -pub use pallet::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; +pub use pallet::*; #[frame_support::pallet(dev_mode)] pub mod pallet { use frame_support::{ + dispatch::{GetDispatchInfo, PostDispatchInfo}, pallet_prelude::*, - dispatch::{Dispatchable, GetDispatchInfo, PostDispatchInfo}, - traits::{UnfilteredDispatchable, IsSubType, OriginTrait}, + traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, }; use frame_system::pallet_prelude::*; + use sp_runtime::traits::Dispatchable; use sp_std::vec::Vec; #[pallet::config] From 6ab76d3641c7cf7bb82ff7589fc742c0aaed92e5 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:07:32 +0200 Subject: [PATCH 096/143] style: retab runtime apis --- runtime/common/runtime_apis.rs | 1381 ++++++++++++++++---------------- 1 file changed, 672 insertions(+), 709 deletions(-) diff --git a/runtime/common/runtime_apis.rs b/runtime/common/runtime_apis.rs index f38234968c..5fed99ba09 100644 --- a/runtime/common/runtime_apis.rs +++ b/runtime/common/runtime_apis.rs @@ -26,713 +26,676 @@ macro_rules! dispatch_unique_runtime { #[macro_export] macro_rules! impl_common_runtime_apis { - ( - $( - #![custom_apis] - - $($custom_apis:tt)+ - )? - ) => { - use sp_std::prelude::*; - use sp_api::impl_runtime_apis; - use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H256, U256, H160}; - use sp_runtime::{ - Permill, - traits::{Block as BlockT}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, DispatchError, - }; - use frame_support::{ - pallet_prelude::Weight, - traits::OnFinalize, - }; - use fp_rpc::TransactionStatus; - use pallet_transaction_payment::{ - FeeDetails, RuntimeDispatchInfo, - }; - use pallet_evm::{ - Runner, account::CrossAccountId as _, - Account as EVMAccount, FeeCalculator, - }; - use runtime_common::{ - sponsoring::{SponsorshipPredict, UniqueSponsorshipPredict}, - dispatch::CollectionDispatch, - config::ethereum::CrossAccountId, - }; - use up_data_structs::*; - - - impl_runtime_apis! { - $($($custom_apis)+)? - - impl up_rpc::UniqueApi for Runtime { - fn account_tokens(collection: CollectionId, account: CrossAccountId) -> Result, DispatchError> { - dispatch_unique_runtime!(collection.account_tokens(account)) - } - fn collection_tokens(collection: CollectionId) -> Result, DispatchError> { - dispatch_unique_runtime!(collection.collection_tokens()) - } - fn token_exists(collection: CollectionId, token: TokenId) -> Result { - dispatch_unique_runtime!(collection.token_exists(token)) - } - - fn token_owner(collection: CollectionId, token: TokenId) -> Result, DispatchError> { - dispatch_unique_runtime!(collection.token_owner(token).ok()) - } - - fn token_owners(collection: CollectionId, token: TokenId) -> Result, DispatchError> { - dispatch_unique_runtime!(collection.token_owners(token)) - } - - fn topmost_token_owner(collection: CollectionId, token: TokenId) -> Result, DispatchError> { - let budget = up_data_structs::budget::Value::new(10); - - >::find_topmost_owner(collection, token, &budget) - } - fn token_children(collection: CollectionId, token: TokenId) -> Result, DispatchError> { - Ok(>::token_children_ids(collection, token)) - } - fn collection_properties( - collection: CollectionId, - keys: Option>> - ) -> Result, DispatchError> { - let keys = keys.map( - |keys| Common::bytes_keys_to_property_keys(keys) - ).transpose()?; - - Common::filter_collection_properties(collection, keys) - } - - fn token_properties( - collection: CollectionId, - token_id: TokenId, - keys: Option>> - ) -> Result, DispatchError> { - let keys = keys.map( - |keys| Common::bytes_keys_to_property_keys(keys) - ).transpose()?; - - dispatch_unique_runtime!(collection.token_properties(token_id, keys)) - } - - fn property_permissions( - collection: CollectionId, - keys: Option>> - ) -> Result, DispatchError> { - let keys = keys.map( - |keys| Common::bytes_keys_to_property_keys(keys) - ).transpose()?; - - Common::filter_property_permissions(collection, keys) - } - - fn token_data( - collection: CollectionId, - token_id: TokenId, - keys: Option>> - ) -> Result, DispatchError> { - let token_data = TokenData { - properties: Self::token_properties(collection, token_id, keys)?, - owner: Self::token_owner(collection, token_id)?, - pieces: Self::total_pieces(collection, token_id)?.unwrap_or(0), - }; - - Ok(token_data) - } - - fn total_supply(collection: CollectionId) -> Result { - dispatch_unique_runtime!(collection.total_supply()) - } - fn account_balance(collection: CollectionId, account: CrossAccountId) -> Result { - dispatch_unique_runtime!(collection.account_balance(account)) - } - fn balance(collection: CollectionId, account: CrossAccountId, token: TokenId) -> Result { - dispatch_unique_runtime!(collection.balance(account, token)) - } - fn allowance( - collection: CollectionId, - sender: CrossAccountId, - spender: CrossAccountId, - token: TokenId, - ) -> Result { - dispatch_unique_runtime!(collection.allowance(sender, spender, token)) - } - - fn adminlist(collection: CollectionId) -> Result, DispatchError> { - Ok(>::adminlist(collection)) - } - fn allowlist(collection: CollectionId) -> Result, DispatchError> { - Ok(>::allowlist(collection)) - } - fn allowed(collection: CollectionId, user: CrossAccountId) -> Result { - Ok(>::allowed(collection, user)) - } - fn last_token_id(collection: CollectionId) -> Result { - dispatch_unique_runtime!(collection.last_token_id()) - } - fn collection_by_id(collection: CollectionId) -> Result>, DispatchError> { - Ok(>::rpc_collection(collection)) - } - fn collection_stats() -> Result { - Ok(>::collection_stats()) - } - fn next_sponsored(collection: CollectionId, account: CrossAccountId, token: TokenId) -> Result, DispatchError> { - Ok( as SponsorshipPredict>::predict( - collection, - account, - token - )) - } - - fn effective_collection_limits(collection: CollectionId) -> Result, DispatchError> { - Ok(>::effective_collection_limits(collection)) - } - - fn total_pieces(collection: CollectionId, token_id: TokenId) -> Result, DispatchError> { - dispatch_unique_runtime!(collection.total_pieces(token_id)) - } - - fn allowance_for_all(collection: CollectionId, owner: CrossAccountId, operator: CrossAccountId) -> Result { - dispatch_unique_runtime!(collection.allowance_for_all(owner, operator)) - } - } - - impl app_promotion_rpc::AppPromotionApi for Runtime { - #[allow(unused_variables)] - fn total_staked(staker: Option) -> Result { - #[cfg(not(feature = "app-promotion"))] - return unsupported!(); - - #[cfg(feature = "app-promotion")] - return Ok(>::cross_id_total_staked(staker).unwrap_or_default()); - } - - #[allow(unused_variables)] - fn total_staked_per_block(staker: CrossAccountId) -> Result, DispatchError> { - #[cfg(not(feature = "app-promotion"))] - return unsupported!(); - - #[cfg(feature = "app-promotion")] - return Ok(>::cross_id_total_staked_per_block(staker)); - } - - #[allow(unused_variables)] - fn pending_unstake(staker: Option) -> Result { - #[cfg(not(feature = "app-promotion"))] - return unsupported!(); - - #[cfg(feature = "app-promotion")] - return Ok(>::cross_id_pending_unstake(staker)); - } - - #[allow(unused_variables)] - fn pending_unstake_per_block(staker: CrossAccountId) -> Result, DispatchError> { - #[cfg(not(feature = "app-promotion"))] - return unsupported!(); - - #[cfg(feature = "app-promotion")] - return Ok(>::cross_id_pending_unstake_per_block(staker)) - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> sp_std::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - - // fn random_seed() -> ::Hash { - // RandomnessCollectiveFlip::random_seed().0 - // } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl fp_rpc::EthereumRuntimeRPCApi for Runtime { - fn chain_id() -> u64 { - ::ChainId::get() - } - - fn account_basic(address: H160) -> EVMAccount { - let (account, _) = EVM::account_basic(&address); - account - } - - fn gas_price() -> U256 { - let (price, _) = ::FeeCalculator::min_gas_price(); - price - } - - fn account_code_at(address: H160) -> Vec { - use pallet_evm::OnMethodCall; - ::OnMethodCall::get_code(&address) - .unwrap_or_else(|| pallet_evm::AccountCodes::::get(address)) - } - - fn author() -> H160 { - >::find_author() - } - - fn storage_at(address: H160, index: U256) -> H256 { - let mut tmp = [0u8; 32]; - index.to_big_endian(&mut tmp); - pallet_evm::AccountStorages::::get(address, H256::from_slice(&tmp[..])) - } - - #[allow(clippy::redundant_closure)] - fn call( - from: H160, - to: H160, - data: Vec, - value: U256, - gas_limit: U256, - max_fee_per_gas: Option, - max_priority_fee_per_gas: Option, - nonce: Option, - estimate: bool, - access_list: Option)>>, - ) -> Result { - let config = if estimate { - let mut config = ::config().clone(); - config.estimate = true; - Some(config) - } else { - None - }; - - let is_transactional = false; - let validate = false; - ::Runner::call( - CrossAccountId::from_eth(from), - to, - data, - value, - gas_limit.low_u64(), - max_fee_per_gas, - max_priority_fee_per_gas, - nonce, - access_list.unwrap_or_default(), - is_transactional, - validate, - // TODO we probably want to support external cost recording in non-transactional calls - None, - None, - - config.as_ref().unwrap_or_else(|| ::config()), - ).map_err(|err| err.error.into()) - } - - #[allow(clippy::redundant_closure)] - fn create( - from: H160, - data: Vec, - value: U256, - gas_limit: U256, - max_fee_per_gas: Option, - max_priority_fee_per_gas: Option, - nonce: Option, - estimate: bool, - access_list: Option)>>, - ) -> Result { - let config = if estimate { - let mut config = ::config().clone(); - config.estimate = true; - Some(config) - } else { - None - }; - - let is_transactional = false; - let validate = false; - ::Runner::create( - CrossAccountId::from_eth(from), - data, - value, - gas_limit.low_u64(), - max_fee_per_gas, - max_priority_fee_per_gas, - nonce, - access_list.unwrap_or_default(), - is_transactional, - validate, - // TODO we probably want to support external cost recording in non-transactional calls - None, - None, - - config.as_ref().unwrap_or_else(|| ::config()), - ).map_err(|err| err.error.into()) - } - - fn current_transaction_statuses() -> Option> { - pallet_ethereum::CurrentTransactionStatuses::::get() - } - - fn current_block() -> Option { - pallet_ethereum::CurrentBlock::::get() - } - - fn current_receipts() -> Option> { - pallet_ethereum::CurrentReceipts::::get() - } - - fn current_all() -> ( - Option, - Option>, - Option> - ) { - ( - pallet_ethereum::CurrentBlock::::get(), - pallet_ethereum::CurrentReceipts::::get(), - pallet_ethereum::CurrentTransactionStatuses::::get() - ) - } - - fn extrinsic_filter(xts: Vec<::Extrinsic>) -> Vec { - xts.into_iter().filter_map(|xt| match xt.0.function { - RuntimeCall::Ethereum(pallet_ethereum::Call::transact { transaction }) => Some(transaction), - _ => None - }).collect() - } - - fn elasticity() -> Option { - None - } - - fn gas_limit_multiplier_support() {} - - fn pending_block( - xts: Vec<::Extrinsic>, - ) -> (Option, Option>) { - for ext in xts.into_iter() { - let _ = Executive::apply_extrinsic(ext); - } - - Ethereum::on_finalize(System::block_number() + 1); - - ( - pallet_ethereum::CurrentBlock::::get(), - pallet_ethereum::CurrentTransactionStatuses::::get() - ) - } - } - - impl fp_rpc::ConvertTransactionRuntimeApi for Runtime { - fn convert_transaction(transaction: pallet_ethereum::Transaction) -> ::Extrinsic { - UncheckedExtrinsic::new_unsigned( - pallet_ethereum::Call::::transact { transaction }.into(), - ) - } - } - - impl sp_session::SessionKeys for Runtime { - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - } - - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Aura::authorities().to_vec() - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Index { - System::account_nonce(account) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - /* - impl pallet_contracts_rpc_runtime_api::ContractsApi - for Runtime - { - fn call( - origin: AccountId, - dest: AccountId, - value: Balance, - gas_limit: u64, - input_data: Vec, - ) -> pallet_contracts_primitives::ContractExecResult { - Contracts::bare_call(origin, dest, value, gas_limit, input_data, false) - } - - fn instantiate( - origin: AccountId, - endowment: Balance, - gas_limit: u64, - code: pallet_contracts_primitives::Code, - data: Vec, - salt: Vec, - ) -> pallet_contracts_primitives::ContractInstantiateResult - { - Contracts::bare_instantiate(origin, endowment, gas_limit, code, data, salt, true, false) - } - - fn get_storage( - address: AccountId, - key: [u8; 32], - ) -> pallet_contracts_primitives::GetStorageResult { - Contracts::get_storage(address, key) - } - - fn rent_projection( - address: AccountId, - ) -> pallet_contracts_primitives::RentProjectionResult { - Contracts::rent_projection(address) - } - } - */ - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - - let mut list = Vec::::new(); - list_benchmark!(list, extra, pallet_xcm, PolkadotXcm); - - list_benchmark!(list, extra, pallet_evm_migration, EvmMigration); - list_benchmark!(list, extra, pallet_common, Common); - list_benchmark!(list, extra, pallet_unique, Unique); - list_benchmark!(list, extra, pallet_structure, Structure); - list_benchmark!(list, extra, pallet_inflation, Inflation); - list_benchmark!(list, extra, pallet_configuration, Configuration); - - #[cfg(feature = "app-promotion")] - list_benchmark!(list, extra, pallet_app_promotion, AppPromotion); - - list_benchmark!(list, extra, pallet_fungible, Fungible); - list_benchmark!(list, extra, pallet_nonfungible, Nonfungible); - - #[cfg(feature = "refungible")] - list_benchmark!(list, extra, pallet_refungible, Refungible); - - #[cfg(feature = "unique-scheduler")] - list_benchmark!(list, extra, pallet_unique_scheduler_v2, Scheduler); - - #[cfg(feature = "collator-selection")] - list_benchmark!(list, extra, pallet_collator_selection, CollatorSelection); - - #[cfg(feature = "collator-selection")] - list_benchmark!(list, extra, pallet_identity, Identity); - - #[cfg(feature = "foreign-assets")] - list_benchmark!(list, extra, pallet_foreign_assets, ForeignAssets); - - list_benchmark!(list, extra, pallet_maintenance, Maintenance); - - // list_benchmark!(list, extra, pallet_evm_coder_substrate, EvmCoderSubstrate); - - let storage_info = AllPalletsWithSystem::storage_info(); - - return (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; - - let allowlist: Vec = vec![ - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - - // Evm CurrentLogs - hex_literal::hex!("1da53b775b270400e7e61ed5cbc5a146547f210cec367e9af919603343b9cb56").to_vec().into(), - - // Transactional depth - hex_literal::hex!("3a7472616e73616374696f6e5f6c6576656c3a").to_vec().into(), - ]; - - let mut batches = Vec::::new(); - let params = (&config, &allowlist); - add_benchmark!(params, batches, pallet_xcm, PolkadotXcm); - - add_benchmark!(params, batches, pallet_evm_migration, EvmMigration); - add_benchmark!(params, batches, pallet_common, Common); - add_benchmark!(params, batches, pallet_unique, Unique); - add_benchmark!(params, batches, pallet_structure, Structure); - add_benchmark!(params, batches, pallet_inflation, Inflation); - add_benchmark!(params, batches, pallet_configuration, Configuration); - - #[cfg(feature = "app-promotion")] - add_benchmark!(params, batches, pallet_app_promotion, AppPromotion); - - add_benchmark!(params, batches, pallet_fungible, Fungible); - add_benchmark!(params, batches, pallet_nonfungible, Nonfungible); - - #[cfg(feature = "refungible")] - add_benchmark!(params, batches, pallet_refungible, Refungible); - - #[cfg(feature = "unique-scheduler")] - add_benchmark!(params, batches, pallet_unique_scheduler_v2, Scheduler); - - #[cfg(feature = "collator-selection")] - add_benchmark!(params, batches, pallet_collator_selection, CollatorSelection); - - #[cfg(feature = "collator-selection")] - add_benchmark!(params, batches, pallet_identity, Identity); - - #[cfg(feature = "foreign-assets")] - add_benchmark!(params, batches, pallet_foreign_assets, ForeignAssets); - - add_benchmark!(params, batches, pallet_maintenance, Maintenance); - - // add_benchmark!(params, batches, pallet_evm_coder_substrate, EvmCoderSubstrate); - - if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } - Ok(batches) - } - } - - impl up_pov_estimate_rpc::PovEstimateApi for Runtime { - #[allow(unused_variables)] - fn pov_estimate(uxt: Vec) -> ApplyExtrinsicResult { - #[cfg(feature = "pov-estimate")] - { - use codec::Decode; - - let uxt_decode = <::Extrinsic as Decode>::decode(&mut &*uxt) - .map_err(|_| DispatchError::Other("failed to decode the extrinsic")); - - let uxt = match uxt_decode { - Ok(uxt) => uxt, - Err(err) => return Ok(err.into()), - }; - - Executive::apply_extrinsic(uxt) - } - - #[cfg(not(feature = "pov-estimate"))] - return Ok(unsupported!()); - } - } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - log::info!("try-runtime::on_runtime_upgrade unique-chain."); - let weight = Executive::try_runtime_upgrade(checks).unwrap(); - (weight, $crate::config::substrate::RuntimeBlockWeights::get().max_block) - } - - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - select: frame_try_runtime::TryStateSelect - ) -> Weight { - log::info!( - target: "node-runtime", - "try-runtime: executing block {:?} / root checks: {:?} / try-state-select: {:?}", - block.header.hash(), - state_root_check, - select, - ); - - Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() - } - } - } - } + ( + $( + #![custom_apis] + + $($custom_apis:tt)+ + )? + ) => { + use sp_std::prelude::*; + use sp_api::impl_runtime_apis; + use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H256, U256, H160}; + use sp_runtime::{ + Permill, + traits::{Block as BlockT}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, DispatchError, + }; + use frame_support::{ + pallet_prelude::Weight, + traits::OnFinalize, + }; + use fp_rpc::TransactionStatus; + use pallet_transaction_payment::{ + FeeDetails, RuntimeDispatchInfo, + }; + use pallet_evm::{ + Runner, account::CrossAccountId as _, + Account as EVMAccount, FeeCalculator, + }; + use runtime_common::{ + sponsoring::{SponsorshipPredict, UniqueSponsorshipPredict}, + dispatch::CollectionDispatch, + config::ethereum::CrossAccountId, + }; + use up_data_structs::*; + use crate::Block; + + impl_runtime_apis! { + $($($custom_apis)+)? + + impl up_rpc::UniqueApi for Runtime { + fn account_tokens(collection: CollectionId, account: CrossAccountId) -> Result, DispatchError> { + dispatch_unique_runtime!(collection.account_tokens(account)) + } + fn collection_tokens(collection: CollectionId) -> Result, DispatchError> { + dispatch_unique_runtime!(collection.collection_tokens()) + } + fn token_exists(collection: CollectionId, token: TokenId) -> Result { + dispatch_unique_runtime!(collection.token_exists(token)) + } + + fn token_owner(collection: CollectionId, token: TokenId) -> Result, DispatchError> { + dispatch_unique_runtime!(collection.token_owner(token).ok()) + } + + fn token_owners(collection: CollectionId, token: TokenId) -> Result, DispatchError> { + dispatch_unique_runtime!(collection.token_owners(token)) + } + + fn topmost_token_owner(collection: CollectionId, token: TokenId) -> Result, DispatchError> { + let budget = up_data_structs::budget::Value::new(10); + + >::find_topmost_owner(collection, token, &budget) + } + fn token_children(collection: CollectionId, token: TokenId) -> Result, DispatchError> { + Ok(>::token_children_ids(collection, token)) + } + fn collection_properties( + collection: CollectionId, + keys: Option>> + ) -> Result, DispatchError> { + let keys = keys.map( + |keys| Common::bytes_keys_to_property_keys(keys) + ).transpose()?; + + Common::filter_collection_properties(collection, keys) + } + + fn token_properties( + collection: CollectionId, + token_id: TokenId, + keys: Option>> + ) -> Result, DispatchError> { + let keys = keys.map( + |keys| Common::bytes_keys_to_property_keys(keys) + ).transpose()?; + + dispatch_unique_runtime!(collection.token_properties(token_id, keys)) + } + + fn property_permissions( + collection: CollectionId, + keys: Option>> + ) -> Result, DispatchError> { + let keys = keys.map( + |keys| Common::bytes_keys_to_property_keys(keys) + ).transpose()?; + + Common::filter_property_permissions(collection, keys) + } + + fn token_data( + collection: CollectionId, + token_id: TokenId, + keys: Option>> + ) -> Result, DispatchError> { + let token_data = TokenData { + properties: Self::token_properties(collection, token_id, keys)?, + owner: Self::token_owner(collection, token_id)?, + pieces: Self::total_pieces(collection, token_id)?.unwrap_or(0), + }; + + Ok(token_data) + } + + fn total_supply(collection: CollectionId) -> Result { + dispatch_unique_runtime!(collection.total_supply()) + } + fn account_balance(collection: CollectionId, account: CrossAccountId) -> Result { + dispatch_unique_runtime!(collection.account_balance(account)) + } + fn balance(collection: CollectionId, account: CrossAccountId, token: TokenId) -> Result { + dispatch_unique_runtime!(collection.balance(account, token)) + } + fn allowance( + collection: CollectionId, + sender: CrossAccountId, + spender: CrossAccountId, + token: TokenId, + ) -> Result { + dispatch_unique_runtime!(collection.allowance(sender, spender, token)) + } + + fn adminlist(collection: CollectionId) -> Result, DispatchError> { + Ok(>::adminlist(collection)) + } + fn allowlist(collection: CollectionId) -> Result, DispatchError> { + Ok(>::allowlist(collection)) + } + fn allowed(collection: CollectionId, user: CrossAccountId) -> Result { + Ok(>::allowed(collection, user)) + } + fn last_token_id(collection: CollectionId) -> Result { + dispatch_unique_runtime!(collection.last_token_id()) + } + fn collection_by_id(collection: CollectionId) -> Result>, DispatchError> { + Ok(>::rpc_collection(collection)) + } + fn collection_stats() -> Result { + Ok(>::collection_stats()) + } + fn next_sponsored(collection: CollectionId, account: CrossAccountId, token: TokenId) -> Result, DispatchError> { + Ok( as SponsorshipPredict>::predict( + collection, + account, + token + )) + } + + fn effective_collection_limits(collection: CollectionId) -> Result, DispatchError> { + Ok(>::effective_collection_limits(collection)) + } + + fn total_pieces(collection: CollectionId, token_id: TokenId) -> Result, DispatchError> { + dispatch_unique_runtime!(collection.total_pieces(token_id)) + } + + fn allowance_for_all(collection: CollectionId, owner: CrossAccountId, operator: CrossAccountId) -> Result { + dispatch_unique_runtime!(collection.allowance_for_all(owner, operator)) + } + } + + impl app_promotion_rpc::AppPromotionApi for Runtime { + #[allow(unused_variables)] + fn total_staked(staker: Option) -> Result { + #[cfg(not(feature = "app-promotion"))] + return unsupported!(); + + #[cfg(feature = "app-promotion")] + return Ok(>::cross_id_total_staked(staker).unwrap_or_default()); + } + + #[allow(unused_variables)] + fn total_staked_per_block(staker: CrossAccountId) -> Result, DispatchError> { + #[cfg(not(feature = "app-promotion"))] + return unsupported!(); + + #[cfg(feature = "app-promotion")] + return Ok(>::cross_id_total_staked_per_block(staker)); + } + + #[allow(unused_variables)] + fn pending_unstake(staker: Option) -> Result { + #[cfg(not(feature = "app-promotion"))] + return unsupported!(); + + #[cfg(feature = "app-promotion")] + return Ok(>::cross_id_pending_unstake(staker)); + } + + #[allow(unused_variables)] + fn pending_unstake_per_block(staker: CrossAccountId) -> Result, DispatchError> { + #[cfg(not(feature = "app-promotion"))] + return unsupported!(); + + #[cfg(feature = "app-promotion")] + return Ok(>::cross_id_pending_unstake_per_block(staker)) + } + } + + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + + // fn random_seed() -> ::Hash { + // RandomnessCollectiveFlip::random_seed().0 + // } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl fp_rpc::EthereumRuntimeRPCApi for Runtime { + fn chain_id() -> u64 { + ::ChainId::get() + } + + fn account_basic(address: H160) -> EVMAccount { + let (account, _) = EVM::account_basic(&address); + account + } + + fn gas_price() -> U256 { + let (price, _) = ::FeeCalculator::min_gas_price(); + price + } + + fn account_code_at(address: H160) -> Vec { + use pallet_evm::OnMethodCall; + ::OnMethodCall::get_code(&address) + .unwrap_or_else(|| pallet_evm::AccountCodes::::get(address)) + } + + fn author() -> H160 { + >::find_author() + } + + fn storage_at(address: H160, index: U256) -> H256 { + let mut tmp = [0u8; 32]; + index.to_big_endian(&mut tmp); + pallet_evm::AccountStorages::::get(address, H256::from_slice(&tmp[..])) + } + + #[allow(clippy::redundant_closure)] + fn call( + from: H160, + to: H160, + data: Vec, + value: U256, + gas_limit: U256, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + nonce: Option, + estimate: bool, + access_list: Option)>>, + ) -> Result { + let config = if estimate { + let mut config = ::config().clone(); + config.estimate = true; + Some(config) + } else { + None + }; + + let is_transactional = false; + let validate = false; + ::Runner::call( + CrossAccountId::from_eth(from), + to, + data, + value, + gas_limit.low_u64(), + max_fee_per_gas, + max_priority_fee_per_gas, + nonce, + access_list.unwrap_or_default(), + is_transactional, + validate, + // TODO we probably want to support external cost recording in non-transactional calls + None, + None, + + config.as_ref().unwrap_or_else(|| ::config()), + ).map_err(|err| err.error.into()) + } + + #[allow(clippy::redundant_closure)] + fn create( + from: H160, + data: Vec, + value: U256, + gas_limit: U256, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + nonce: Option, + estimate: bool, + access_list: Option)>>, + ) -> Result { + let config = if estimate { + let mut config = ::config().clone(); + config.estimate = true; + Some(config) + } else { + None + }; + + let is_transactional = false; + let validate = false; + ::Runner::create( + CrossAccountId::from_eth(from), + data, + value, + gas_limit.low_u64(), + max_fee_per_gas, + max_priority_fee_per_gas, + nonce, + access_list.unwrap_or_default(), + is_transactional, + validate, + // TODO we probably want to support external cost recording in non-transactional calls + None, + None, + + config.as_ref().unwrap_or_else(|| ::config()), + ).map_err(|err| err.error.into()) + } + + fn current_transaction_statuses() -> Option> { + pallet_ethereum::CurrentTransactionStatuses::::get() + } + + fn current_block() -> Option { + pallet_ethereum::CurrentBlock::::get() + } + + fn current_receipts() -> Option> { + pallet_ethereum::CurrentReceipts::::get() + } + + fn current_all() -> ( + Option, + Option>, + Option> + ) { + ( + pallet_ethereum::CurrentBlock::::get(), + pallet_ethereum::CurrentReceipts::::get(), + pallet_ethereum::CurrentTransactionStatuses::::get() + ) + } + + fn extrinsic_filter(xts: Vec<::Extrinsic>) -> Vec { + xts.into_iter().filter_map(|xt| match xt.0.function { + RuntimeCall::Ethereum(pallet_ethereum::Call::transact { transaction }) => Some(transaction), + _ => None + }).collect() + } + + fn elasticity() -> Option { + None + } + + fn gas_limit_multiplier_support() {} + + fn pending_block( + xts: Vec<::Extrinsic>, + ) -> (Option, Option>) { + for ext in xts.into_iter() { + let _ = Executive::apply_extrinsic(ext); + } + + Ethereum::on_finalize(System::block_number() + 1); + + ( + pallet_ethereum::CurrentBlock::::get(), + pallet_ethereum::CurrentTransactionStatuses::::get() + ) + } + } + + impl sp_session::SessionKeys for Runtime { + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().to_vec() + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + + let mut list = Vec::::new(); + list_benchmark!(list, extra, pallet_xcm, PolkadotXcm); + + list_benchmark!(list, extra, pallet_evm_migration, EvmMigration); + list_benchmark!(list, extra, pallet_common, Common); + list_benchmark!(list, extra, pallet_unique, Unique); + list_benchmark!(list, extra, pallet_structure, Structure); + list_benchmark!(list, extra, pallet_inflation, Inflation); + list_benchmark!(list, extra, pallet_configuration, Configuration); + + #[cfg(feature = "app-promotion")] + list_benchmark!(list, extra, pallet_app_promotion, AppPromotion); + + list_benchmark!(list, extra, pallet_fungible, Fungible); + list_benchmark!(list, extra, pallet_nonfungible, Nonfungible); + + #[cfg(feature = "refungible")] + list_benchmark!(list, extra, pallet_refungible, Refungible); + + #[cfg(feature = "unique-scheduler")] + list_benchmark!(list, extra, pallet_unique_scheduler_v2, Scheduler); + + #[cfg(feature = "collator-selection")] + list_benchmark!(list, extra, pallet_collator_selection, CollatorSelection); + + #[cfg(feature = "collator-selection")] + list_benchmark!(list, extra, pallet_identity, Identity); + + #[cfg(feature = "foreign-assets")] + list_benchmark!(list, extra, pallet_foreign_assets, ForeignAssets); + + list_benchmark!(list, extra, pallet_maintenance, Maintenance); + + // list_benchmark!(list, extra, pallet_evm_coder_substrate, EvmCoderSubstrate); + + let storage_info = AllPalletsWithSystem::storage_info(); + + return (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; + + let allowlist: Vec = vec![ + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + + // Evm CurrentLogs + hex_literal::hex!("1da53b775b270400e7e61ed5cbc5a146547f210cec367e9af919603343b9cb56").to_vec().into(), + + // Transactional depth + hex_literal::hex!("3a7472616e73616374696f6e5f6c6576656c3a").to_vec().into(), + ]; + + let mut batches = Vec::::new(); + let params = (&config, &allowlist); + add_benchmark!(params, batches, pallet_xcm, PolkadotXcm); + + add_benchmark!(params, batches, pallet_evm_migration, EvmMigration); + add_benchmark!(params, batches, pallet_common, Common); + add_benchmark!(params, batches, pallet_unique, Unique); + add_benchmark!(params, batches, pallet_structure, Structure); + add_benchmark!(params, batches, pallet_inflation, Inflation); + add_benchmark!(params, batches, pallet_configuration, Configuration); + + #[cfg(feature = "app-promotion")] + add_benchmark!(params, batches, pallet_app_promotion, AppPromotion); + + add_benchmark!(params, batches, pallet_fungible, Fungible); + add_benchmark!(params, batches, pallet_nonfungible, Nonfungible); + + #[cfg(feature = "refungible")] + add_benchmark!(params, batches, pallet_refungible, Refungible); + + #[cfg(feature = "unique-scheduler")] + add_benchmark!(params, batches, pallet_unique_scheduler_v2, Scheduler); + + #[cfg(feature = "collator-selection")] + add_benchmark!(params, batches, pallet_collator_selection, CollatorSelection); + + #[cfg(feature = "collator-selection")] + add_benchmark!(params, batches, pallet_identity, Identity); + + #[cfg(feature = "foreign-assets")] + add_benchmark!(params, batches, pallet_foreign_assets, ForeignAssets); + + add_benchmark!(params, batches, pallet_maintenance, Maintenance); + + // add_benchmark!(params, batches, pallet_evm_coder_substrate, EvmCoderSubstrate); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } + + impl up_pov_estimate_rpc::PovEstimateApi for Runtime { + #[allow(unused_variables)] + fn pov_estimate(uxt: Vec) -> ApplyExtrinsicResult { + #[cfg(feature = "pov-estimate")] + { + use parity_scale_codec::Decode; + + let uxt_decode = <::Extrinsic as Decode>::decode(&mut &*uxt) + .map_err(|_| DispatchError::Other("failed to decode the extrinsic")); + + let uxt = match uxt_decode { + Ok(uxt) => uxt, + Err(err) => return Ok(err.into()), + }; + + Executive::apply_extrinsic(uxt) + } + + #[cfg(not(feature = "pov-estimate"))] + return Ok(unsupported!()); + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + log::info!("try-runtime::on_runtime_upgrade unique-chain."); + let weight = Executive::try_runtime_upgrade(checks).unwrap(); + (weight, $crate::config::substrate::RuntimeBlockWeights::get().max_block) + } + + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect + ) -> Weight { + log::info!( + target: "node-runtime", + "try-runtime: executing block {:?} / root checks: {:?} / try-state-select: {:?}", + block.header.hash(), + state_root_check, + select, + ); + + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } + } + + /// Should never be used, yet still required because of https://github.com/paritytech/polkadot-sdk/issues/27 + /// Not allowed to panic, because rpc may be called using native runtime, thus causing thread panic. + impl fp_rpc::ConvertTransactionRuntimeApi for Runtime { + fn convert_transaction( + transaction: pallet_ethereum::Transaction + ) -> ::Extrinsic { + UncheckedExtrinsic::new_unsigned( + pallet_ethereum::Call::::transact { transaction }.into(), + ) + } + } + } + } } From 71150e7c9b752b801e7dec283831c5f0033f19c9 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:07:35 +0200 Subject: [PATCH 097/143] feat: use new ethereum sponsoring interface --- pallets/evm-transaction-payment/src/lib.rs | 203 +++++++++++++++++++-- runtime/common/config/ethereum.rs | 5 +- 2 files changed, 187 insertions(+), 21 deletions(-) diff --git a/pallets/evm-transaction-payment/src/lib.rs b/pallets/evm-transaction-payment/src/lib.rs index 6b79a838c4..f664ec7f82 100644 --- a/pallets/evm-transaction-payment/src/lib.rs +++ b/pallets/evm-transaction-payment/src/lib.rs @@ -60,29 +60,62 @@ pub mod pallet { pub struct Pallet(_); } -/// Implements [`fp_evm::TransactionValidityHack`], which provides sponsor address to pallet-evm -pub struct TransactionValidityHack(PhantomData<*const T>); -impl fp_evm::TransactionValidityHack for TransactionValidityHack { - fn who_pays_fee( - origin: H160, - max_fee: U256, - reason: &WithdrawReason, - ) -> Option { - match reason { - WithdrawReason::Call { target, input } => { - let origin_sub = T::CrossAccountId::from_eth(origin); - let call_context = CallContext { - contract_address: *target, - input: input.clone(), - max_fee, - }; - T::EvmSponsorshipHandler::get_sponsor(&origin_sub, &call_context) - } - _ => None, +fn who_pays_fee( + origin: H160, + max_fee: U256, + reason: &WithdrawReason, +) -> Option { + match reason { + WithdrawReason::Call { target, input, .. } => { + let origin_sub = T::CrossAccountId::from_eth(origin); + let call_context = CallContext { + contract_address: *target, + input: input.clone(), + max_fee, + }; + T::EvmSponsorshipHandler::get_sponsor(&origin_sub, &call_context) } + _ => None, } } +fn get_sponsor( + source: H160, + max_fee_per_gas: Option, + gas_limit: u64, + reason: &WithdrawReason, + is_transactional: bool, + is_check: bool, +) -> Option { + let accept_gas_fee = |gas_fee| { + let (base_fee, _) = T::FeeCalculator::min_gas_price(); + base_fee <= gas_fee && gas_fee <= base_fee * 21 / 10 + }; + let (max_fee_per_gas, may_sponsor) = match (max_fee_per_gas, is_transactional) { + (Some(max_fee_per_gas), _) => (max_fee_per_gas, accept_gas_fee(max_fee_per_gas)), + // Gas price check is skipped for non-transactional calls that don't + // define a `max_fee_per_gas` input. + (None, false) => (Default::default(), true), + _ => return None, + }; + + let max_fee = max_fee_per_gas.saturating_mul(gas_limit.into()); + + // #[cfg(feature = "debug-logging")] + // log::trace!(target: "sponsoring", "checking who will pay fee for {:?} {:?}", source, reason); + with_transaction(|| { + let result = may_sponsor + .then(|| who_pays_fee::(source, max_fee, reason)) + .flatten(); + if is_check { + TransactionOutcome::Rollback(Ok::<_, DispatchError>(result)) + } else { + TransactionOutcome::Commit(Ok(result)) + } + }) + .ok() + .flatten() +} /// Implements sponsoring for evm calls performed from pallet-evm (via api.tx.ethereum.transact/api.tx.evm.call) pub struct BridgeSponsorshipHandler(PhantomData); impl SponsorshipHandler for BridgeSponsorshipHandler @@ -127,3 +160,135 @@ where } } } + +/// Set transaction sponsor if available and enough balance. +pub struct TransactionValidity(PhantomData); +impl OnCheckEvmTransaction for TransactionValidity { + fn on_check_evm_transaction( + v: &mut CheckEvmTransaction, + origin: &T::CrossAccountId, + ) -> Result<(), TransactionValidationError> { + let who = &v.who; + let max_fee_per_gas = Some(v.transaction_fee_input()?.0); + let gas_limit = v.transaction.gas_limit.low_u64(); + let reason = if let Some(to) = v.transaction.to { + WithdrawReason::Call { + target: to, + input: v.transaction.input.clone(), + max_fee_per_gas, + gas_limit, + is_transactional: v.config.is_transactional, + is_check: true, + } + } else { + WithdrawReason::Create + }; + let sponsor = get_sponsor::( + *origin.as_eth(), + max_fee_per_gas, + gas_limit, + &reason, + v.config.is_transactional, + true, + ) + .as_ref() + .map(pallet_evm::Pallet::::account_basic_by_id) + .map(|v| v.0); + let fee = max_fee_per_gas + .unwrap() + .saturating_mul(v.transaction.gas_limit); + if let Some(sponsor) = sponsor.as_ref() { + if who.balance < v.transaction.value || sponsor.balance < fee { + return Err(TransactionValidationError::BalanceTooLow.into()); + } + } else { + let total_payment = v.transaction.value.saturating_add(fee); + if who.balance < total_payment { + return Err(TransactionValidationError::BalanceTooLow.into()); + } + } + + let who = sponsor.unwrap_or_else(|| v.who.clone()); + v.who.balance = who.balance; + Ok(()) + } +} + +/// Implements the transaction payment for a pallet implementing the `Currency` +/// trait (eg. the pallet_balances) using an unbalance handler (implementing +/// `OnUnbalanced`). +/// Similar to `CurrencyAdapter` of `pallet_transaction_payment` +pub struct WrappedEVMCurrencyAdapter(sp_std::marker::PhantomData<(C, OU)>); +impl OnChargeEVMTransaction for WrappedEVMCurrencyAdapter +where + T: Config, + C: Currency<::AccountId>, + C::PositiveImbalance: Imbalance< + ::AccountId>>::Balance, + Opposite = C::NegativeImbalance, + >, + C::NegativeImbalance: Imbalance< + ::AccountId>>::Balance, + Opposite = C::PositiveImbalance, + >, + OU: OnUnbalanced>, + U256: UniqueSaturatedInto<::AccountId>>::Balance>, +{ + // Kept type as Option to satisfy bound of Default + type LiquidityInfo = (Option>, Option); + + fn withdraw_fee( + who: &T::CrossAccountId, + reason: WithdrawReason, + fee: U256, + ) -> Result> { + let sponsor = match reason { + WithdrawReason::Call { + max_fee_per_gas, + gas_limit, + is_transactional, + is_check, + .. + } => get_sponsor::( + *who.as_eth(), + max_fee_per_gas, + gas_limit, + &reason, + is_transactional, + is_check, + ), + _ => None, + }; + + let who = sponsor.as_ref().unwrap_or(who); + as OnChargeEVMTransaction>::withdraw_fee( + who, reason, fee, + ) + .map(|li| (li, sponsor)) + } + + fn correct_and_deposit_fee( + who: &T::CrossAccountId, + corrected_fee: U256, + base_fee: U256, + already_withdrawn: Self::LiquidityInfo, + ) -> Self::LiquidityInfo { + let (already_withdrawn, sponsor) = already_withdrawn; + let who = sponsor.as_ref().unwrap_or(who); + ( + as OnChargeEVMTransaction>::correct_and_deposit_fee( + who, + corrected_fee, + base_fee, + already_withdrawn, + ), + None + ) + } + + fn pay_priority_fee(tip: Self::LiquidityInfo) { + as OnChargeEVMTransaction>::pay_priority_fee( + tip.0, + ) + } +} diff --git a/runtime/common/config/ethereum.rs b/runtime/common/config/ethereum.rs index bfe3da6d82..6c8e5d58d0 100644 --- a/runtime/common/config/ethereum.rs +++ b/runtime/common/config/ethereum.rs @@ -89,12 +89,13 @@ impl pallet_evm::Config for Runtime { type OnCreate = pallet_evm_contract_helpers::HelpersOnCreate; type ChainId = ChainId; type Runner = pallet_evm::runner::stack::Runner; - type OnChargeTransaction = pallet_evm::EVMCurrencyAdapter; - type TransactionValidityHack = pallet_evm_transaction_payment::TransactionValidityHack; + type OnChargeTransaction = + pallet_evm_transaction_payment::WrappedEVMCurrencyAdapter; type FindAuthor = EthereumFindAuthor; type Timestamp = crate::Timestamp; type WeightInfo = pallet_evm::weights::SubstrateWeight; type GasLimitPovSizeRatio = ProofSizePerGas; + type OnCheckEvmTransaction = pallet_evm_transaction_payment::TransactionValidity; } impl pallet_evm_migration::Config for Runtime { From 4b36925b9a9a415d95533c0345c7ad3b8dd3bf37 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:07:38 +0200 Subject: [PATCH 098/143] build: bump node version --- Cargo.lock | 10 +++++----- Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a825878fec..b1922dbcaa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6442,7 +6442,7 @@ checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opal-runtime" -version = "0.9.43" +version = "1.1.0" dependencies = [ "app-promotion-rpc", "cumulus-pallet-aura-ext", @@ -10050,7 +10050,7 @@ dependencies = [ [[package]] name = "quartz-runtime" -version = "0.9.43" +version = "1.1.0" dependencies = [ "app-promotion-rpc", "cumulus-pallet-aura-ext", @@ -14719,7 +14719,7 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "unique-node" -version = "0.9.43" +version = "1.1.0" dependencies = [ "app-promotion-rpc", "clap", @@ -14802,7 +14802,7 @@ dependencies = [ [[package]] name = "unique-runtime" -version = "0.9.43" +version = "1.1.0" dependencies = [ "app-promotion-rpc", "cumulus-pallet-aura-ext", @@ -14949,7 +14949,7 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "up-common" -version = "0.9.43" +version = "1.1.0" dependencies = [ "cumulus-primitives-core", "fp-rpc", diff --git a/Cargo.toml b/Cargo.toml index deea71cf01..25f8c29d66 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ members = [ 'runtime/tests', 'runtime/unique', ] -package.version = "0.9.43" +package.version = "1.1.0" resolver = "2" [profile.release] From 2eca527f0699d999795ab8f6eeeddda6fa5a321c Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:09:16 +0200 Subject: [PATCH 099/143] fixup(rpc): move to node --- node/cli/src/rpc.rs | 279 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 279 insertions(+) create mode 100644 node/cli/src/rpc.rs diff --git a/node/cli/src/rpc.rs b/node/cli/src/rpc.rs new file mode 100644 index 0000000000..8c33ade0b5 --- /dev/null +++ b/node/cli/src/rpc.rs @@ -0,0 +1,279 @@ +// Copyright 2019-2022 Unique Network (Gibraltar) Ltd. +// This file is part of Unique Network. + +// Unique Network is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Unique Network is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Unique Network. If not, see . + +use std::sync::Arc; + +use fc_mapping_sync::{EthereumBlockNotification, EthereumBlockNotificationSinks}; +use fc_rpc::{ + pending::AuraConsensusDataProvider, EthBlockDataCacheTask, EthConfig, OverrideHandle, +}; +use fc_rpc_core::types::{FeeHistoryCache, FilterPool}; +use fp_rpc::NoTransactionConverter; +use jsonrpsee::RpcModule; +use sc_client_api::{ + backend::{AuxStore, StorageProvider}, + client::BlockchainEvents, + UsageProvider, +}; +use sc_network::NetworkService; +use sc_network_sync::SyncingService; +use sc_rpc::SubscriptionTaskExecutor; +pub use sc_rpc_api::DenyUnsafe; +use sc_service::TransactionPool; +use sc_transaction_pool::{ChainApi, Pool}; +use sp_api::ProvideRuntimeApi; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sp_inherents::CreateInherentDataProviders; +use sp_runtime::traits::BlakeTwo256; +use up_common::types::opaque::*; + +use crate::service::RuntimeApiDep; + +#[cfg(feature = "pov-estimate")] +type FullBackend = sc_service::TFullBackend; + +/// Full client dependencies. +pub struct FullDeps { + /// The client instance to use. + pub client: Arc, + /// Transaction pool instance. + pub pool: Arc

, + /// The SelectChain Strategy + pub select_chain: SC, + /// Whether to deny unsafe calls + pub deny_unsafe: DenyUnsafe, + + /// Runtime identification (read from the chain spec) + pub runtime_id: RuntimeId, + /// Executor params for PoV estimating + #[cfg(feature = "pov-estimate")] + pub exec_params: uc_rpc::pov_estimate::ExecutorParams, + /// Substrate Backend. + #[cfg(feature = "pov-estimate")] + pub backend: Arc, +} + +/// Instantiate all Full RPC extensions. +pub fn create_full( + io: &mut RpcModule<()>, + deps: FullDeps, +) -> Result<(), Box> +where + C: ProvideRuntimeApi + StorageProvider + AuxStore, + C: HeaderBackend + HeaderMetadata + 'static, + C: Send + Sync + 'static, + C: BlockchainEvents, + C::Api: RuntimeApiDep, + B: sc_client_api::Backend + Send + Sync + 'static, + P: TransactionPool + 'static, + R: RuntimeInstance + Send + Sync + 'static, + ::CrossAccountId: serde::Serialize, + C: sp_api::CallApiAt< + sp_runtime::generic::Block< + sp_runtime::generic::Header, + sp_runtime::OpaqueExtrinsic, + >, + >, + for<'de> ::CrossAccountId: serde::Deserialize<'de>, +{ + // use pallet_contracts_rpc::{Contracts, ContractsApi}; + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; + use substrate_frame_rpc_system::{System, SystemApiServer}; + #[cfg(feature = "pov-estimate")] + use uc_rpc::pov_estimate::{PovEstimate, PovEstimateApiServer}; + use uc_rpc::{AppPromotion, AppPromotionApiServer, Unique, UniqueApiServer}; + + let FullDeps { + client, + pool, + select_chain: _, + deny_unsafe, + + runtime_id: _, + + #[cfg(feature = "pov-estimate")] + exec_params, + + #[cfg(feature = "pov-estimate")] + backend, + } = deps; + + io.merge(System::new(Arc::clone(&client), Arc::clone(&pool), deny_unsafe).into_rpc())?; + io.merge(TransactionPayment::new(Arc::clone(&client)).into_rpc())?; + + io.merge(Unique::new(client.clone()).into_rpc())?; + + io.merge(AppPromotion::new(client).into_rpc())?; + + #[cfg(feature = "pov-estimate")] + io.merge( + PovEstimate::new( + client.clone(), + backend, + deny_unsafe, + exec_params, + runtime_id, + ) + .into_rpc(), + )?; + + Ok(()) +} + +pub struct EthDeps { + /// The client instance to use. + pub client: Arc, + /// Transaction pool instance. + pub pool: Arc

, + /// Graph pool instance. + pub graph: Arc>, + /// Syncing service + pub sync: Arc>, + /// The Node authority flag + pub is_authority: bool, + /// Network service + pub network: Arc>, + + /// Ethereum Backend. + pub eth_backend: Arc + Send + Sync>, + /// Maximum number of logs in a query. + pub max_past_logs: u32, + /// Maximum fee history cache size. + pub fee_history_limit: u64, + /// Fee history cache. + pub fee_history_cache: FeeHistoryCache, + pub eth_block_data_cache: Arc>, + /// EthFilterApi pool. + pub eth_filter_pool: Option, + pub eth_pubsub_notification_sinks: + Arc>>, + /// Whether to enable eth dev signer + pub enable_dev_signer: bool, + + pub overrides: Arc>, + pub pending_create_inherent_data_providers: CIDP, +} + +pub fn create_eth( + io: &mut RpcModule<()>, + deps: EthDeps, + subscription_task_executor: SubscriptionTaskExecutor, +) -> Result<(), Box> +where + C: ProvideRuntimeApi + StorageProvider + AuxStore, + C: HeaderBackend + HeaderMetadata + 'static, + C: Send + Sync + 'static, + C: BlockchainEvents, + C: UsageProvider, + C::Api: RuntimeApiDep, + P: TransactionPool + 'static, + CA: ChainApi + 'static, + B: sc_client_api::Backend + Send + Sync + 'static, + C: sp_api::CallApiAt, + CIDP: CreateInherentDataProviders + Send + 'static, + EC: EthConfig, + R: RuntimeInstance, +{ + use fc_rpc::{ + Eth, EthApiServer, EthDevSigner, EthFilter, EthFilterApiServer, EthPubSub, + EthPubSubApiServer, EthSigner, Net, NetApiServer, Web3, Web3ApiServer, + }; + + let EthDeps { + client, + pool, + graph, + eth_backend, + max_past_logs, + fee_history_limit, + fee_history_cache, + eth_block_data_cache, + eth_filter_pool, + eth_pubsub_notification_sinks, + enable_dev_signer, + sync, + is_authority, + network, + overrides, + pending_create_inherent_data_providers, + } = deps; + + let mut signers = Vec::new(); + if enable_dev_signer { + signers.push(Box::new(EthDevSigner::new()) as Box); + } + let execute_gas_limit_multiplier = 10; + io.merge( + Eth::<_, _, _, _, _, _, _, EC>::new( + client.clone(), + pool.clone(), + graph.clone(), + // We have no runtimes old enough to only accept converted transactions + None::, + sync.clone(), + signers, + overrides.clone(), + eth_backend.clone(), + is_authority, + eth_block_data_cache.clone(), + fee_history_cache, + fee_history_limit, + execute_gas_limit_multiplier, + None, + pending_create_inherent_data_providers, + Some(Box::new(AuraConsensusDataProvider::new(client.clone()))), + ) + .into_rpc(), + )?; + + if let Some(filter_pool) = eth_filter_pool { + io.merge( + EthFilter::new( + client.clone(), + eth_backend, + graph.clone(), + filter_pool, + 500_usize, // max stored filters + max_past_logs, + eth_block_data_cache, + ) + .into_rpc(), + )?; + } + io.merge( + Net::new( + client.clone(), + network, + // Whether to format the `peer_count` response as Hex (default) or not. + true, + ) + .into_rpc(), + )?; + io.merge(Web3::new(client.clone()).into_rpc())?; + io.merge( + EthPubSub::new( + pool, + client, + sync, + subscription_task_executor, + overrides, + eth_pubsub_notification_sinks, + ) + .into_rpc(), + )?; + + Ok(()) +} From e48f2d1c76b505cd5ab2839cb0b95d645ccd2c86 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 3 Oct 2023 00:51:54 +0200 Subject: [PATCH 100/143] fixup(imports): post-rebase --- Cargo.toml | 2 +- node/cli/src/command.rs | 13 ++++--------- node/cli/src/service.rs | 4 ++-- pallets/collator-selection/src/benchmarking.rs | 6 +++--- pallets/common/src/lib.rs | 2 +- pallets/configuration/src/benchmarking.rs | 2 +- pallets/inflation/src/benchmarking.rs | 10 +++++----- pallets/nonfungible/src/benchmarking.rs | 2 +- pallets/nonfungible/src/lib.rs | 12 ++++++------ pallets/refungible/src/benchmarking.rs | 2 +- pallets/refungible/src/lib.rs | 2 +- runtime/opal/src/xcm_barrier.rs | 6 ++++-- runtime/quartz/src/xcm_barrier.rs | 4 ++-- runtime/unique/src/xcm_barrier.rs | 4 ++-- 14 files changed, 34 insertions(+), 37 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 25f8c29d66..3bc5bdd180 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,8 +26,8 @@ lto = true opt-level = 3 [profile.integration-tests] -inherits = "release" debug-assertions = true +inherits = "release" [workspace.dependencies] # Unique diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 22a34fe61b..c117ad897d 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -32,20 +32,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::time::Duration; - -use codec::Encode; -use cumulus_client_cli::generate_genesis_block; use cumulus_primitives_core::ParaId; -use log::{debug, info}; +use log::info; use sc_cli::{ ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, - NetworkParams, Result, RuntimeVersion, SharedParams, SubstrateCli, + NetworkParams, Result, SharedParams, SubstrateCli, }; use sc_service::config::{BasePath, PrometheusConfig}; -use sp_core::hexdisplay::HexDisplay; -use sp_runtime::traits::{AccountIdConversion, Block as BlockT}; -use up_common::types::opaque::{Block, RuntimeId}; +use sp_runtime::traits::AccountIdConversion; +use up_common::types::opaque::RuntimeId; #[cfg(feature = "runtime-benchmarks")] use crate::chain_spec::default_runtime; diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 7c6c2a9620..aa73cc7ec4 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -824,8 +824,8 @@ where { use fc_consensus::FrontierBlockImport; use sc_consensus_manual_seal::{ - run_manual_seal, run_delayed_finalize, EngineCommand, ManualSealParams, - DelayedFinalizeParams, + run_delayed_finalize, run_manual_seal, DelayedFinalizeParams, EngineCommand, + ManualSealParams, }; let sc_service::PartialComponents { diff --git a/pallets/collator-selection/src/benchmarking.rs b/pallets/collator-selection/src/benchmarking.rs index 37838ee4d2..43e38c5f0e 100644 --- a/pallets/collator-selection/src/benchmarking.rs +++ b/pallets/collator-selection/src/benchmarking.rs @@ -35,15 +35,15 @@ use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; use frame_support::{ assert_ok, - parity_scale_codec::Decode, traits::{ fungible::{Inspect, Mutate}, EnsureOrigin, Get, }, }; -use frame_system::{EventRecord, RawOrigin}; +use frame_system::{pallet_prelude::*, EventRecord, RawOrigin}; use pallet_authorship::EventHandler; use pallet_session::{self as session, SessionManager}; +use parity_scale_codec::Decode; use sp_std::prelude::*; use super::*; @@ -338,7 +338,7 @@ benchmarks! { register_candidates::(c); let new_block: BlockNumberFor= 1800u32.into(); - let zero_block: T::BlockNumber = 0u32.into(); + let zero_block: BlockNumberFor = 0u32.into(); let candidates = >::get(); let non_removals = c.saturating_sub(r); diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index d4d29073c2..0b56768590 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -54,9 +54,9 @@ extern crate alloc; use core::{ + marker::PhantomData, ops::{Deref, DerefMut}, slice::from_ref, - marker::PhantomData, }; use evm_coder::ToLog; diff --git a/pallets/configuration/src/benchmarking.rs b/pallets/configuration/src/benchmarking.rs index 4943feb2a4..69beb609a0 100644 --- a/pallets/configuration/src/benchmarking.rs +++ b/pallets/configuration/src/benchmarking.rs @@ -18,7 +18,7 @@ use frame_benchmarking::benchmarks; use frame_support::assert_ok; -use frame_system::{EventRecord, RawOrigin}; +use frame_system::{pallet_prelude::*, EventRecord, RawOrigin}; use super::*; diff --git a/pallets/inflation/src/benchmarking.rs b/pallets/inflation/src/benchmarking.rs index 430a9c3606..32ef928e06 100644 --- a/pallets/inflation/src/benchmarking.rs +++ b/pallets/inflation/src/benchmarking.rs @@ -17,7 +17,7 @@ #![cfg(feature = "runtime-benchmarks")] use frame_benchmarking::benchmarks; -use frame_support::traits::OnInitialize; +use frame_support::{pallet_prelude::*, traits::Hooks}; use super::*; use crate::Pallet as Inflation; @@ -25,9 +25,9 @@ use crate::Pallet as Inflation; benchmarks! { on_initialize { - let block1: T::BlockNumber = T::BlockNumber::from(1u32); - let block2: T::BlockNumber = T::BlockNumber::from(2u32); - Inflation::::on_initialize(block1); // Create Treasury account - }: { Inflation::::on_initialize(block2); } // Benchmark deposit_into_existing path + let block1: BlockNumberFor = 1u32.into(); + let block2: BlockNumberFor = 2u32.into(); + as Hooks>::on_initialize(block1); // Create Treasury account + }: { as Hooks>::on_initialize(block2); } // Benchmark deposit_into_existing path } diff --git a/pallets/nonfungible/src/benchmarking.rs b/pallets/nonfungible/src/benchmarking.rs index 63886f5a39..8584aab31d 100644 --- a/pallets/nonfungible/src/benchmarking.rs +++ b/pallets/nonfungible/src/benchmarking.rs @@ -18,7 +18,7 @@ use frame_benchmarking::{account, benchmarks}; use pallet_common::{ bench_init, benchmarking::{ - create_collection_raw, property_key, property_value, load_is_admin_and_property_permissions, + create_collection_raw, load_is_admin_and_property_permissions, property_key, property_value, }, CommonCollectionOperations, }; diff --git a/pallets/nonfungible/src/lib.rs b/pallets/nonfungible/src/lib.rs index bfbe4c6ad9..948e31061b 100644 --- a/pallets/nonfungible/src/lib.rs +++ b/pallets/nonfungible/src/lib.rs @@ -103,9 +103,9 @@ use frame_support::{ }; pub use pallet::*; use pallet_common::{ - Error as CommonError, Pallet as PalletCommon, Event as CommonEvent, CollectionHandle, - eth::collection_id_to_address, SelfWeightOf as PalletCommonWeightOf, - weights::WeightInfo as CommonWeightInfo, helpers::add_weight_to_post_info, + eth::collection_id_to_address, helpers::add_weight_to_post_info, + weights::WeightInfo as CommonWeightInfo, CollectionHandle, Error as CommonError, + Event as CommonEvent, Pallet as PalletCommon, SelfWeightOf as PalletCommonWeightOf, }; use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; use pallet_evm_coder_substrate::{SubstrateRecorder, WithRecorder}; @@ -116,9 +116,9 @@ use sp_core::{Get, H160}; use sp_runtime::{ArithmeticError, DispatchError, DispatchResult, TransactionOutcome}; use sp_std::{collections::btree_map::BTreeMap, vec, vec::Vec}; use up_data_structs::{ - AccessMode, CollectionId, CustomDataLimit, TokenId, CreateCollectionData, CreateNftExData, - mapping::TokenAddressMapping, budget::Budget, Property, PropertyKey, PropertyValue, - PropertyKeyPermission, PropertyScope, TokenChild, AuxPropertyValue, PropertiesPermissionMap, + budget::Budget, mapping::TokenAddressMapping, AccessMode, AuxPropertyValue, CollectionId, + CreateCollectionData, CreateNftExData, CustomDataLimit, PropertiesPermissionMap, Property, + PropertyKey, PropertyKeyPermission, PropertyScope, PropertyValue, TokenChild, TokenId, TokenProperties as TokenPropertiesT, }; use weights::WeightInfo; diff --git a/pallets/refungible/src/benchmarking.rs b/pallets/refungible/src/benchmarking.rs index a76ccf8c5f..469b582742 100644 --- a/pallets/refungible/src/benchmarking.rs +++ b/pallets/refungible/src/benchmarking.rs @@ -20,7 +20,7 @@ use frame_benchmarking::{account, benchmarks}; use pallet_common::{ bench_init, benchmarking::{ - create_collection_raw, property_key, property_value, load_is_admin_and_property_permissions, + create_collection_raw, load_is_admin_and_property_permissions, property_key, property_value, }, }; use sp_std::prelude::*; diff --git a/pallets/refungible/src/lib.rs b/pallets/refungible/src/lib.rs index 766859a588..ea03543ab8 100644 --- a/pallets/refungible/src/lib.rs +++ b/pallets/refungible/src/lib.rs @@ -106,7 +106,7 @@ use up_data_structs::{ budget::Budget, mapping::TokenAddressMapping, AccessMode, CollectionId, CreateCollectionData, CreateRefungibleExMultipleOwners, PropertiesPermissionMap, Property, PropertyKey, PropertyKeyPermission, PropertyScope, PropertyValue, TokenId, TokenOwnerError, - TokenProperties as TokenPropertiesT, TrySetProperty, MAX_REFUNGIBLE_PIECES, + TokenProperties as TokenPropertiesT, MAX_REFUNGIBLE_PIECES, }; use crate::{erc::ERC721Events, erc_token::ERC20Events}; diff --git a/runtime/opal/src/xcm_barrier.rs b/runtime/opal/src/xcm_barrier.rs index bd0d90b496..d16b4a2825 100644 --- a/runtime/opal/src/xcm_barrier.rs +++ b/runtime/opal/src/xcm_barrier.rs @@ -15,8 +15,10 @@ // along with Unique Network. If not, see . use frame_support::{match_types, traits::Everything}; -use xcm::latest::{Junctions::*, MultiLocation}; -use staging_xcm_builder::{AllowTopLevelPaidExecutionFrom, TakeWeightCredit, AllowExplicitUnpaidExecutionFrom}; +use staging_xcm::latest::{Junctions::*, MultiLocation}; +use staging_xcm_builder::{ + AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom, TakeWeightCredit, +}; match_types! { pub type ParentOnly: impl Contains = { diff --git a/runtime/quartz/src/xcm_barrier.rs b/runtime/quartz/src/xcm_barrier.rs index 654547c2cf..15fa44dec6 100644 --- a/runtime/quartz/src/xcm_barrier.rs +++ b/runtime/quartz/src/xcm_barrier.rs @@ -17,8 +17,8 @@ use frame_support::{match_types, traits::Everything}; use staging_xcm::latest::{Junctions::*, MultiLocation}; use staging_xcm_builder::{ - AllowKnownQueryResponses, AllowSubscriptionsFrom, TakeWeightCredit, - AllowTopLevelPaidExecutionFrom, AllowExplicitUnpaidExecutionFrom, + AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, TakeWeightCredit, }; use crate::PolkadotXcm; diff --git a/runtime/unique/src/xcm_barrier.rs b/runtime/unique/src/xcm_barrier.rs index 654547c2cf..15fa44dec6 100644 --- a/runtime/unique/src/xcm_barrier.rs +++ b/runtime/unique/src/xcm_barrier.rs @@ -17,8 +17,8 @@ use frame_support::{match_types, traits::Everything}; use staging_xcm::latest::{Junctions::*, MultiLocation}; use staging_xcm_builder::{ - AllowKnownQueryResponses, AllowSubscriptionsFrom, TakeWeightCredit, - AllowTopLevelPaidExecutionFrom, AllowExplicitUnpaidExecutionFrom, + AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, TakeWeightCredit, }; use crate::PolkadotXcm; From fd6af7529aa79c3f4c1a5bc84b682a320c006ad2 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 3 Oct 2023 12:55:46 +0200 Subject: [PATCH 101/143] fix: force_value in LazyValue --- pallets/common/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index 0b56768590..23d605497e 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -888,18 +888,18 @@ impl T> LazyValue { /// Get the value. If it is called the first time, the value will be initialized. pub fn value(&mut self) -> &T { - self.compute_value_if_not_already(); + self.force_value(); self.value.as_ref().unwrap() } /// Get the value. If it is called the first time, the value will be initialized. pub fn value_mut(&mut self) -> &mut T { - self.compute_value_if_not_already(); + self.force_value(); self.value.as_mut().unwrap() } fn into_inner(mut self) -> T { - self.compute_value_if_not_already(); + self.force_value(); self.value.unwrap() } @@ -908,7 +908,7 @@ impl T> LazyValue { self.value.is_some() } - fn compute_value_if_not_already(&mut self) { + fn force_value(&mut self) { if self.value.is_none() { self.value = Some(self.f.take().unwrap()()) } From 0e3444d4ba8a9556f7c48df70c5ad04d46e1572e Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 3 Oct 2023 13:08:30 +0200 Subject: [PATCH 102/143] fix: clippy warn --- pallets/refungible/src/common.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pallets/refungible/src/common.rs b/pallets/refungible/src/common.rs index cfb3af2ffb..74aa322bb9 100644 --- a/pallets/refungible/src/common.rs +++ b/pallets/refungible/src/common.rs @@ -497,15 +497,13 @@ impl CommonCollectionOperations for RefungibleHandle { return Ok(false); } - let is_bundle_owner = >::check_indirectly_owned( + >::check_indirectly_owned( maybe_owner.clone(), self.id, token, None, nesting_budget, - )?; - - Ok(is_bundle_owner) + ) } /// Returns 10 token in no particular order. From f5ed3dae1fee2103d1ae8a28441ea66dfdb05ca9 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 3 Oct 2023 13:08:45 +0200 Subject: [PATCH 103/143] fix: remove unused import --- runtime/common/runtime_apis.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/runtime/common/runtime_apis.rs b/runtime/common/runtime_apis.rs index 5fed99ba09..112f680216 100644 --- a/runtime/common/runtime_apis.rs +++ b/runtime/common/runtime_apis.rs @@ -60,7 +60,6 @@ macro_rules! impl_common_runtime_apis { config::ethereum::CrossAccountId, }; use up_data_structs::*; - use crate::Block; impl_runtime_apis! { $($($custom_apis)+)? From f9563faaa852231ac4ec796ce05c3267d60b0f60 Mon Sep 17 00:00:00 2001 From: PraetorP Date: Tue, 3 Oct 2023 10:43:50 +0000 Subject: [PATCH 104/143] fix(xcm test): hooks for `Astar`\`Shiden` --- tests/src/util/playgrounds/unique.xcm.ts | 4 ++++ tests/src/xcm/lowLevelXcmQuartz.test.ts | 2 +- tests/src/xcm/lowLevelXcmUnique.test.ts | 2 +- tests/src/xcm/xcmQuartz.test.ts | 6 +++--- tests/src/xcm/xcmUnique.test.ts | 6 +++--- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/tests/src/util/playgrounds/unique.xcm.ts b/tests/src/util/playgrounds/unique.xcm.ts index a980c2c99c..c9704ac7f2 100644 --- a/tests/src/util/playgrounds/unique.xcm.ts +++ b/tests/src/util/playgrounds/unique.xcm.ts @@ -244,6 +244,10 @@ export class AssetsGroup extends HelperGroup { await this.helper.executeExtrinsic(signer, 'api.tx.assets.create', [assetId, admin, minimalBalance], true); } + async forceCreate(signer: TSigner, assetId: number | bigint, admin: string, minimalBalance: bigint, isSufficient = true) { + await this.helper.executeExtrinsic(signer, 'api.tx.assets.forceCreate', [assetId, admin, isSufficient, minimalBalance], true); + } + async setMetadata(signer: TSigner, assetId: number | bigint, name: string, symbol: string, decimals: number) { await this.helper.executeExtrinsic(signer, 'api.tx.assets.setMetadata', [assetId, name, symbol, decimals], true); } diff --git a/tests/src/xcm/lowLevelXcmQuartz.test.ts b/tests/src/xcm/lowLevelXcmQuartz.test.ts index 6f06ed760d..fd3549d353 100644 --- a/tests/src/xcm/lowLevelXcmQuartz.test.ts +++ b/tests/src/xcm/lowLevelXcmQuartz.test.ts @@ -243,7 +243,7 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Shiden', () => { await usingShidenPlaygrounds(shidenUrl, async (helper) => { if(!(await helper.callRpc('api.query.assets.asset', [QTZ_ASSET_ID_ON_SHIDEN])).toJSON()) { console.log('1. Create foreign asset and metadata'); - await helper.assets.create( + await helper.getSudo().assets.forceCreate( alice, QTZ_ASSET_ID_ON_SHIDEN, alice.address, diff --git a/tests/src/xcm/lowLevelXcmUnique.test.ts b/tests/src/xcm/lowLevelXcmUnique.test.ts index f66432dab1..8bab9e31de 100644 --- a/tests/src/xcm/lowLevelXcmUnique.test.ts +++ b/tests/src/xcm/lowLevelXcmUnique.test.ts @@ -309,7 +309,7 @@ describeXCM('[XCMLL] Integration test: Exchanging tokens with Astar', () => { await usingAstarPlaygrounds(astarUrl, async (helper) => { if(!(await helper.callRpc('api.query.assets.asset', [UNQ_ASSET_ID_ON_ASTAR])).toJSON()) { console.log('1. Create foreign asset and metadata'); - await helper.assets.create( + await helper.getSudo().assets.forceCreate( alice, UNQ_ASSET_ID_ON_ASTAR, alice.address, diff --git a/tests/src/xcm/xcmQuartz.test.ts b/tests/src/xcm/xcmQuartz.test.ts index 92e4310e89..49b9f84bb2 100644 --- a/tests/src/xcm/xcmQuartz.test.ts +++ b/tests/src/xcm/xcmQuartz.test.ts @@ -1290,11 +1290,11 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Shiden', () => { const shidenInitialBalance = 1n * (10n ** SHIDEN_DECIMALS); // 1 SHD, existential deposit required to actually create the account on Shiden const unitsPerSecond = 500_451_000_000_000_000_000n; // The value is taken from the live Shiden const qtzToShidenTransferred = 10n * (10n ** QTZ_DECIMALS); // 10 QTZ - const qtzToShidenArrived = 9_999_999_999_088_000_000n; // 9.999 ... QTZ, Shiden takes a commision in foreign tokens + const qtzToShidenArrived = 7_998_196_000_000_000_000n; // 7.99 ... QTZ, Shiden takes a commision in foreign tokens // Shiden -> Quartz const qtzFromShidenTransfered = 5n * (10n ** QTZ_DECIMALS); // 5 QTZ - const qtzOnShidenLeft = qtzToShidenArrived - qtzFromShidenTransfered; // 4.999_999_999_088_000_000n QTZ + const qtzOnShidenLeft = qtzToShidenArrived - qtzFromShidenTransfered; // 2.99 ... QTZ let balanceAfterQuartzToShidenXCM: bigint; @@ -1311,7 +1311,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Shiden', () => { await usingShidenPlaygrounds(shidenUrl, async (helper) => { if(!(await helper.callRpc('api.query.assets.asset', [QTZ_ASSET_ID_ON_SHIDEN])).toJSON()) { console.log('1. Create foreign asset and metadata'); - await helper.assets.create( + await helper.getSudo().assets.forceCreate( alice, QTZ_ASSET_ID_ON_SHIDEN, alice.address, diff --git a/tests/src/xcm/xcmUnique.test.ts b/tests/src/xcm/xcmUnique.test.ts index b720914581..76f7693bb8 100644 --- a/tests/src/xcm/xcmUnique.test.ts +++ b/tests/src/xcm/xcmUnique.test.ts @@ -1518,11 +1518,11 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { const astarInitialBalance = 1n * (10n ** ASTAR_DECIMALS); // 1 ASTR, existential deposit required to actually create the account on Astar. const unitsPerSecond = 9_451_000_000_000_000_000n; // The value is taken from the live Astar const unqToAstarTransferred = 10n * (10n ** UNQ_DECIMALS); // 10 UNQ - const unqToAstarArrived = 9_999_999_999_088_000_000n; // 9.999 ... UNQ, Astar takes a commision in foreign tokens + const unqToAstarArrived = 9_962_196_000_000_000_000n; // 9.962 ... UNQ, Astar takes a commision in foreign tokens // Astar -> Unique const unqFromAstarTransfered = 5n * (10n ** UNQ_DECIMALS); // 5 UNQ - const unqOnAstarLeft = unqToAstarArrived - unqFromAstarTransfered; // 4.999_999_999_088_000_000n UNQ + const unqOnAstarLeft = unqToAstarArrived - unqFromAstarTransfered; // 4.962_219_600_000_000_000n UNQ let balanceAfterUniqueToAstarXCM: bigint; @@ -1539,7 +1539,7 @@ describeXCM('[XCM] Integration test: Exchanging tokens with Astar', () => { await usingAstarPlaygrounds(astarUrl, async (helper) => { if(!(await helper.callRpc('api.query.assets.asset', [UNQ_ASSET_ID_ON_ASTAR])).toJSON()) { console.log('1. Create foreign asset and metadata'); - await helper.assets.create( + await helper.getSudo().assets.forceCreate( alice, UNQ_ASSET_ID_ON_ASTAR, alice.address, From 13e91d34e4ba9d158dc2c26f3e94489b432b065a Mon Sep 17 00:00:00 2001 From: PraetorP Date: Wed, 4 Oct 2023 16:11:10 +0700 Subject: [PATCH 105/143] fix(`evm-transaction-payment`): clippy --- pallets/evm-transaction-payment/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/evm-transaction-payment/src/lib.rs b/pallets/evm-transaction-payment/src/lib.rs index f664ec7f82..62986c6890 100644 --- a/pallets/evm-transaction-payment/src/lib.rs +++ b/pallets/evm-transaction-payment/src/lib.rs @@ -199,12 +199,12 @@ impl OnCheckEvmTransaction for TransactionValidity { .saturating_mul(v.transaction.gas_limit); if let Some(sponsor) = sponsor.as_ref() { if who.balance < v.transaction.value || sponsor.balance < fee { - return Err(TransactionValidationError::BalanceTooLow.into()); + return Err(TransactionValidationError::BalanceTooLow); } } else { let total_payment = v.transaction.value.saturating_add(fee); if who.balance < total_payment { - return Err(TransactionValidationError::BalanceTooLow.into()); + return Err(TransactionValidationError::BalanceTooLow); } } From 716fee23155e8ba950d36860602e18c9ee1f226b Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Wed, 4 Oct 2023 15:41:39 +0200 Subject: [PATCH 106/143] chore: remove unique-scheduler leftovers --- Cargo.toml | 2 - Makefile | 5 -- runtime/common/config/pallets/mod.rs | 3 - runtime/common/config/pallets/scheduler.rs | 88 ---------------------- runtime/common/maintenance.rs | 3 - runtime/common/mod.rs | 3 - runtime/common/runtime_apis.rs | 6 -- runtime/opal/Cargo.toml | 1 - runtime/quartz/Cargo.toml | 1 - runtime/unique/Cargo.toml | 1 - 10 files changed, 113 deletions(-) delete mode 100644 runtime/common/config/pallets/scheduler.rs diff --git a/Cargo.toml b/Cargo.toml index 3bc5bdd180..0affad4865 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,5 @@ [workspace] default-members = ['client/*', 'node/*', 'runtime/opal'] -exclude = ['pallets/scheduler-v2'] members = [ 'client/*', 'crates/*', @@ -54,7 +53,6 @@ pallet-refungible = { default-features = false, path = "pallets/refungible" } pallet-structure = { default-features = false, path = "pallets/structure" } pallet-test-utils = { default-features = false, path = "test-pallets/utils" } pallet-unique = { path = "pallets/unique", default-features = false } -# pallet-unique-scheduler-v2 = { path = "pallets/scheduler-v2", default-features = false } precompile-utils-macro = { path = "runtime/common/ethereum/precompiles/utils/macro" } struct-versioning = { path = "crates/struct-versioning" } uc-rpc = { path = "client/rpc" } diff --git a/Makefile b/Makefile index e920b2b077..98664e2762 100644 --- a/Makefile +++ b/Makefile @@ -128,10 +128,6 @@ bench-nonfungible: bench-structure: make _bench PALLET=structure -.PHONY: bench-scheduler -bench-scheduler: - make _bench PALLET=unique-scheduler-v2 PALLET_DIR=scheduler-v2 - .PHONY: bench-foreign-assets bench-foreign-assets: make _bench PALLET=foreign-assets @@ -157,7 +153,6 @@ bench-xcm: make _bench PALLET=xcm OUTPUT=./runtime/common/weights/xcm.rs TEMPLATE="--template=.maintain/external-weight-template.hbs" .PHONY: bench -# Disabled: bench-scheduler bench: bench-app-promotion bench-common bench-evm-migration bench-unique bench-structure bench-fungible bench-refungible bench-nonfungible bench-configuration bench-foreign-assets bench-maintenance bench-xcm bench-collator-selection bench-identity .PHONY: check diff --git a/runtime/common/config/pallets/mod.rs b/runtime/common/config/pallets/mod.rs index caba1b889f..1d7abdc768 100644 --- a/runtime/common/config/pallets/mod.rs +++ b/runtime/common/config/pallets/mod.rs @@ -40,9 +40,6 @@ use crate::{ Balances, Runtime, RuntimeCall, RuntimeEvent, DECIMALS, TOKEN_SYMBOL, VERSION, }; -#[cfg(feature = "unique-scheduler")] -pub mod scheduler; - #[cfg(feature = "foreign-assets")] pub mod foreign_asset; diff --git a/runtime/common/config/pallets/scheduler.rs b/runtime/common/config/pallets/scheduler.rs deleted file mode 100644 index 7035cfa10b..0000000000 --- a/runtime/common/config/pallets/scheduler.rs +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019-2022 Unique Network (Gibraltar) Ltd. -// This file is part of Unique Network. - -// Unique Network is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Unique Network is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Unique Network. If not, see . - -use core::cmp::Ordering; - -use frame_support::{ - parameter_types, - traits::{EnsureOrigin, PrivilegeCmp}, - weights::Weight, -}; -use frame_system::{EnsureRoot, RawOrigin}; -use pallet_unique_scheduler_v2::ScheduledEnsureOriginSuccess; -use parity_scale_codec::Decode; -use sp_runtime::Perbill; -use up_common::types::AccountId; - -use crate::{ - runtime_common::{config::substrate::RuntimeBlockWeights, scheduler::SchedulerPaymentExecutor}, - OriginCaller, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, -}; - -parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(50) * - RuntimeBlockWeights::get().max_block; - pub const MaxScheduledPerBlock: u32 = 50; - - pub const NoPreimagePostponement: Option = Some(10); - pub const Preimage: Option = Some(10); -} - -pub struct EnsureSignedOrRoot(sp_std::marker::PhantomData); -impl, O>> + From>, AccountId: Decode> - EnsureOrigin for EnsureSignedOrRoot -{ - type Success = ScheduledEnsureOriginSuccess; - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Root => Ok(ScheduledEnsureOriginSuccess::Root), - RawOrigin::Signed(who) => Ok(ScheduledEnsureOriginSuccess::Signed(who)), - r => Err(O::from(r)), - }) - } -} - -pub struct EqualOrRootOnly; -impl PrivilegeCmp for EqualOrRootOnly { - fn cmp_privilege(left: &OriginCaller, right: &OriginCaller) -> Option { - use RawOrigin::*; - - let left = left.clone().try_into().ok()?; - let right = right.clone().try_into().ok()?; - - match (left, right) { - (Root, Root) => Some(Ordering::Equal), - (Root, _) => Some(Ordering::Greater), - (_, Root) => Some(Ordering::Less), - lr @ _ => (lr.0 == lr.1).then(|| Ordering::Equal), - } - } -} - -impl pallet_unique_scheduler_v2::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; - type PalletsOrigin = OriginCaller; - type RuntimeCall = RuntimeCall; - type MaximumWeight = MaximumSchedulerWeight; - type ScheduleOrigin = EnsureSignedOrRoot; - type OriginPrivilegeCmp = EqualOrRootOnly; - type MaxScheduledPerBlock = MaxScheduledPerBlock; - type WeightInfo = (); - type Preimages = (); - type CallExecutor = SchedulerPaymentExecutor; - type PrioritySetOrigin = EnsureRoot; -} diff --git a/runtime/common/maintenance.rs b/runtime/common/maintenance.rs index f30a143725..a9982e008e 100644 --- a/runtime/common/maintenance.rs +++ b/runtime/common/maintenance.rs @@ -67,9 +67,6 @@ impl SignedExtension for CheckMaintenance { | RuntimeCall::Structure(_) | RuntimeCall::Unique(_) => Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - #[cfg(feature = "unique-scheduler")] - RuntimeCall::Scheduler(_) => Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - #[cfg(feature = "app-promotion")] RuntimeCall::AppPromotion(_) => { Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) diff --git a/runtime/common/mod.rs b/runtime/common/mod.rs index 932f2a031b..cd5be709ce 100644 --- a/runtime/common/mod.rs +++ b/runtime/common/mod.rs @@ -23,9 +23,6 @@ pub mod instance; pub mod maintenance; pub mod runtime_apis; -#[cfg(feature = "unique-scheduler")] -pub mod scheduler; - pub mod sponsoring; #[allow(missing_docs)] pub mod weights; diff --git a/runtime/common/runtime_apis.rs b/runtime/common/runtime_apis.rs index 112f680216..14d8c0b50d 100644 --- a/runtime/common/runtime_apis.rs +++ b/runtime/common/runtime_apis.rs @@ -548,9 +548,6 @@ macro_rules! impl_common_runtime_apis { #[cfg(feature = "refungible")] list_benchmark!(list, extra, pallet_refungible, Refungible); - #[cfg(feature = "unique-scheduler")] - list_benchmark!(list, extra, pallet_unique_scheduler_v2, Scheduler); - #[cfg(feature = "collator-selection")] list_benchmark!(list, extra, pallet_collator_selection, CollatorSelection); @@ -614,9 +611,6 @@ macro_rules! impl_common_runtime_apis { #[cfg(feature = "refungible")] add_benchmark!(params, batches, pallet_refungible, Refungible); - #[cfg(feature = "unique-scheduler")] - add_benchmark!(params, batches, pallet_unique_scheduler_v2, Scheduler); - #[cfg(feature = "collator-selection")] add_benchmark!(params, batches, pallet_collator_selection, CollatorSelection); diff --git a/runtime/opal/Cargo.toml b/runtime/opal/Cargo.toml index b03309be17..028d89a76d 100644 --- a/runtime/opal/Cargo.toml +++ b/runtime/opal/Cargo.toml @@ -229,7 +229,6 @@ governance = [] preimage = [] refungible = [] session-test-timings = [] -unique-scheduler = [] ################################################################################ # local dependencies diff --git a/runtime/quartz/Cargo.toml b/runtime/quartz/Cargo.toml index c6733a9688..34e342ec8a 100644 --- a/runtime/quartz/Cargo.toml +++ b/runtime/quartz/Cargo.toml @@ -221,7 +221,6 @@ governance = [] preimage = [] refungible = [] session-test-timings = [] -unique-scheduler = [] ################################################################################ # local dependencies diff --git a/runtime/unique/Cargo.toml b/runtime/unique/Cargo.toml index 6b03dfeeb7..7314c8b6ae 100644 --- a/runtime/unique/Cargo.toml +++ b/runtime/unique/Cargo.toml @@ -224,7 +224,6 @@ governance = [] preimage = [] refungible = [] session-test-timings = [] -unique-scheduler = [] ################################################################################ # local dependencies From 22a5178edb6c8c0f553ea40013a2338b58f73302 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Mon, 9 Oct 2023 15:55:53 +0200 Subject: [PATCH 107/143] build: bump spec_version --- runtime/opal/src/lib.rs | 2 +- runtime/quartz/src/lib.rs | 4 ++-- runtime/unique/src/lib.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/runtime/opal/src/lib.rs b/runtime/opal/src/lib.rs index 25a8068705..08de6516f7 100644 --- a/runtime/opal/src/lib.rs +++ b/runtime/opal/src/lib.rs @@ -49,7 +49,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("opal"), impl_name: create_runtime_str!("opal"), authoring_version: 1, - spec_version: 943061, + spec_version: 10010062, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, diff --git a/runtime/quartz/src/lib.rs b/runtime/quartz/src/lib.rs index 2943553aae..a27cecfaec 100644 --- a/runtime/quartz/src/lib.rs +++ b/runtime/quartz/src/lib.rs @@ -51,7 +51,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_name: create_runtime_str!("quartz"), authoring_version: 1, - spec_version: 943061, + spec_version: 10010062, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, @@ -64,7 +64,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_name: create_runtime_str!("sapphire"), authoring_version: 1, - spec_version: 943061, + spec_version: 10010062, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, diff --git a/runtime/unique/src/lib.rs b/runtime/unique/src/lib.rs index 462f3a155a..e34bbda0f4 100644 --- a/runtime/unique/src/lib.rs +++ b/runtime/unique/src/lib.rs @@ -49,7 +49,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("unique"), impl_name: create_runtime_str!("unique"), authoring_version: 1, - spec_version: 943061, + spec_version: 10010062, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, From c3c95caf228f2ce21444353f1c934c134d0ead83 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Mon, 9 Oct 2023 15:58:42 +0200 Subject: [PATCH 108/143] test: fix build --- pallets/collator-selection/src/mock.rs | 40 +++++++++++---------- pallets/collator-selection/src/tests.rs | 8 ++--- pallets/identity/src/tests.rs | 6 ++-- pallets/inflation/src/tests.rs | 48 +++++++++++++------------ runtime/common/tests/mod.rs | 10 +++--- runtime/common/tests/xcm.rs | 6 ++-- runtime/tests/src/dispatch.rs | 1 + runtime/tests/src/lib.rs | 16 ++++----- runtime/tests/src/weights | 1 + 9 files changed, 70 insertions(+), 66 deletions(-) create mode 120000 runtime/tests/src/dispatch.rs create mode 120000 runtime/tests/src/weights diff --git a/pallets/collator-selection/src/mock.rs b/pallets/collator-selection/src/mock.rs index 5355bf2f0b..2d9e42c1ce 100644 --- a/pallets/collator-selection/src/mock.rs +++ b/pallets/collator-selection/src/mock.rs @@ -32,23 +32,22 @@ use frame_support::{ ord_parameter_types, parameter_types, - traits::{FindAuthor, GenesisBuild, ValidatorRegistration}, + traits::{ConstU32, FindAuthor, ValidatorRegistration}, PalletId, }; use frame_system as system; use frame_system::EnsureSignedBy; -use sp_core::H256; +use sp_core::{ConstBool, H256}; use sp_runtime::{ - testing::{Header, UintAuthorityId}, + testing::UintAuthorityId, traits::{BlakeTwo256, IdentityLookup, OpaqueKeys}, - Perbill, RuntimeAppPublic, + BuildStorage, Perbill, RuntimeAppPublic, }; use super::*; use crate as collator_selection; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; +type Block = frame_system::mocking::MockBlockU32; // Configure a mock runtime to test the pallet. frame_support::construct_runtime!( @@ -64,12 +63,13 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const BlockHashCount: u64 = 250; + pub const BlockHashCount: u32 = 250; pub const SS58Prefix: u8 = 42; } impl system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; type BlockWeights = (); type BlockLength = (); type DbWeight = (); @@ -90,7 +90,7 @@ impl system::Config for Test { type SystemWeightInfo = (); type SS58Prefix = SS58Prefix; type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type MaxConsumers = ConstU32<16>; } parameter_types! { @@ -113,6 +113,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = [u8; 16]; type MaxHolds = MaxHolds; type MaxFreezes = MaxFreezes; + type RuntimeHoldReason = RuntimeHoldReason; } pub struct Author4; @@ -145,6 +146,7 @@ impl pallet_aura::Config for Test { type AuthorityId = sp_consensus_aura::sr25519::AuthorityId; type MaxAuthorities = MaxAuthorities; type DisabledValidators = (); + type AllowMultipleBlocksPerSlot = ConstBool; } sp_runtime::impl_opaque_keys! { @@ -162,27 +164,27 @@ impl From for MockSessionKeys { parameter_types! { pub static SessionHandlerCollators: Vec = Vec::new(); - pub static SessionChangeBlock: u64 = 0; + pub static SessionChangeBlock: u32 = 0; } pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; fn on_genesis_session(keys: &[(u64, Ks)]) { - SessionHandlerCollators::set(keys.into_iter().map(|(a, _)| *a).collect::>()) + SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::>()) } fn on_new_session(_: bool, keys: &[(u64, Ks)], _: &[(u64, Ks)]) { SessionChangeBlock::set(System::block_number()); dbg!(keys.len()); - SessionHandlerCollators::set(keys.into_iter().map(|(a, _)| *a).collect::>()) + SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::>()) } fn on_before_session_ending() {} fn on_disabled(_: u32) {} } parameter_types! { - pub const Offset: u64 = 0; - pub const Period: u64 = 10; + pub const Offset: u32 = 0; + pub const Period: u32 = 10; } impl pallet_session::Config for Test { @@ -201,7 +203,7 @@ impl pallet_session::Config for Test { parameter_types! { pub const MaxCollators: u32 = 5; pub const LicenseBond: u64 = 10; - pub const KickThreshold: u64 = 10; + pub const KickThreshold: u32 = 10; // the following values do not matter and are meaningless, etc. pub const DefaultWeightToFeeCoefficient: u64 = 100_000; pub const DefaultMinGasPrice: u64 = 100_000; @@ -230,6 +232,7 @@ impl ValidatorRegistration for IsRegistered { impl Config for Test { type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = RuntimeHoldReason; type UpdateOrigin = EnsureSignedBy; type PotId = PotId; type MaxCollators = MaxCollators; @@ -238,7 +241,6 @@ impl Config for Test { type ValidatorId = ::AccountId; type ValidatorIdOf = IdentityCollator; type ValidatorRegistration = IsRegistered; - type LicenceBondIdentifier = LicenceBondIdentifier; type Currency = Balances; type DesiredCollators = MaxCollators; type LicenseBond = LicenseBond; @@ -248,8 +250,8 @@ impl Config for Test { pub fn new_test_ext() -> sp_io::TestExternalities { sp_tracing::try_init_simple(); - let mut t = frame_system::GenesisConfig::default() - .build_storage::() + let mut t = >::default() + .build_storage() .unwrap(); let invulnerables = vec![1, 2]; @@ -284,9 +286,9 @@ pub fn new_test_ext() -> sp_io::TestExternalities { t.into() } -pub fn initialize_to_block(n: u64) { +pub fn initialize_to_block(n: u32) { for i in System::block_number() + 1..=n { System::set_block_number(i); - >::on_initialize(i); + >::on_initialize(i); } } diff --git a/pallets/collator-selection/src/tests.rs b/pallets/collator-selection/src/tests.rs index b13a44e0f1..096d50b593 100644 --- a/pallets/collator-selection/src/tests.rs +++ b/pallets/collator-selection/src/tests.rs @@ -32,10 +32,10 @@ use frame_support::{ assert_noop, assert_ok, - traits::{fungible, GenesisBuild, OnInitialize}, + traits::{fungible, OnInitialize}, }; use scale_info::prelude::*; -use sp_runtime::{traits::BadOrigin, TokenError}; +use sp_runtime::{traits::BadOrigin, BuildStorage, TokenError}; use crate::{self as collator_selection, mock::*, Config, Error}; @@ -464,8 +464,8 @@ fn kick_mechanism() { #[should_panic = "duplicate invulnerables in genesis."] fn cannot_set_genesis_value_twice() { sp_tracing::try_init_simple(); - let mut t = frame_system::GenesisConfig::default() - .build_storage::() + let mut t = >::default() + .build_storage() .unwrap(); let invulnerables = vec![1, 1]; diff --git a/pallets/identity/src/tests.rs b/pallets/identity/src/tests.rs index 859a1f3095..9592b10b12 100644 --- a/pallets/identity/src/tests.rs +++ b/pallets/identity/src/tests.rs @@ -43,7 +43,6 @@ use frame_system::{EnsureRoot, EnsureSignedBy}; use parity_scale_codec::{Decode, Encode}; use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, BuildStorage, }; @@ -51,8 +50,7 @@ use sp_runtime::{ use super::*; use crate as pallet_identity; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; +type Block = frame_system::mocking::MockBlockU32; frame_support::construct_runtime!( pub enum Test { @@ -79,7 +77,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; + type BlockHashCount = ConstU32<250>; type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/pallets/inflation/src/tests.rs b/pallets/inflation/src/tests.rs index 59821b9a32..221c964496 100644 --- a/pallets/inflation/src/tests.rs +++ b/pallets/inflation/src/tests.rs @@ -34,12 +34,12 @@ use sp_runtime::{ use crate as pallet_inflation; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; +type Block = frame_system::mocking::MockBlockU32; -const YEAR: u64 = 5_259_600; // 6-second blocks - // const YEAR: u64 = 2_629_800; // 12-second blocks - // Expected 100-block inflation for year 1 is 100 * 100_000_000 / YEAR = FIRST_YEAR_BLOCK_INFLATION +// 6-second blocks +// const YEAR: u32 = 2_629_800; // 12-second blocks +// Expected 100-block inflation for year 1 is 100 * 100_000_000 / YEAR = FIRST_YEAR_BLOCK_INFLATION +const YEAR: u32 = 5_259_600; const FIRST_YEAR_BLOCK_INFLATION: u64 = 1901; parameter_types! { @@ -60,6 +60,7 @@ impl pallet_balances::Config for Test { type FreezeIdentifier = (); type MaxHolds = (); type MaxFreezes = (); + type RuntimeHoldReason = RuntimeHoldReason; } frame_support::construct_runtime!( @@ -71,7 +72,7 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const BlockHashCount: u64 = 250; + pub const BlockHashCount: u32 = 250; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, 0)); pub const SS58Prefix: u8 = 42; @@ -79,6 +80,7 @@ parameter_types! { impl frame_system::Config for Test { type BaseCallFilter = Everything; + type Block = Block; type BlockWeights = (); type BlockLength = (); type DbWeight = (); @@ -187,7 +189,7 @@ fn inflation_works() { fn inflation_second_deposit() { new_test_ext().execute_with(|| { // Total issuance = 1_000_000_000 - let initial_issuance: u64 = 1_000_000_000; + let initial_issuance = 1_000_000_000; let _ = >::deposit(&1234, initial_issuance, Precision::Exact); assert_eq!(Balances::free_balance(1234), initial_issuance); MockBlockNumberProvider::set(1); @@ -196,20 +198,20 @@ fn inflation_second_deposit() { assert_ok!(Inflation::start_inflation(RawOrigin::Root.into(), 1)); // Next inflation deposit happens when block is greater then or equal to NextInflationBlock - let mut block: u64 = 2; - let balance_before: u64 = Balances::free_balance(1234); + let mut block = 2; + let balance_before = Balances::free_balance(1234); while block < >::get() { - MockBlockNumberProvider::set(block as u64); + MockBlockNumberProvider::set(block); Inflation::on_initialize(0); block += 1; } - let balance_just_before: u64 = Balances::free_balance(1234); + let balance_just_before = Balances::free_balance(1234); assert_eq!(balance_before, balance_just_before); // The block with inflation - MockBlockNumberProvider::set(block as u64); + MockBlockNumberProvider::set(block); Inflation::on_initialize(0); - let balance_after: u64 = Balances::free_balance(1234); + let balance_after = Balances::free_balance(1234); assert_eq!(balance_after - balance_just_before, block_inflation!()); }); } @@ -234,7 +236,7 @@ fn inflation_in_1_year() { Inflation::on_initialize(0); } assert_eq!( - initial_issuance + (FIRST_YEAR_BLOCK_INFLATION * (YEAR / 100)), + initial_issuance + (FIRST_YEAR_BLOCK_INFLATION * ((YEAR as u64) / 100)), >::total_issuance() ); @@ -243,8 +245,8 @@ fn inflation_in_1_year() { let block_inflation_year_2 = block_inflation!(); // Expected 100-block inflation for year 2: 100 * 9.33% * initial issuance * 110% / YEAR == 1951 let expecter_year_2_inflation: u64 = (initial_issuance - + FIRST_YEAR_BLOCK_INFLATION * YEAR / 100) - * 933 * 100 / (10000 * YEAR); + + FIRST_YEAR_BLOCK_INFLATION * (YEAR as u64) / 100) + * 933 * 100 / (10000 * (YEAR as u64)); assert_eq!(block_inflation_year_2 / 10, expecter_year_2_inflation / 10); // divide by 10 for approx. equality }); } @@ -253,8 +255,8 @@ fn inflation_in_1_year() { fn inflation_start_large_kusama_block() { new_test_ext().execute_with(|| { // Total issuance = 1_000_000_000 - let initial_issuance: u64 = 1_000_000_000; - let start_block: u64 = 10457457; + let initial_issuance = 1_000_000_000; + let start_block = 10457457; let _ = >::deposit(&1234, initial_issuance, Precision::Exact); assert_eq!(Balances::free_balance(1234), initial_issuance); MockBlockNumberProvider::set(start_block); @@ -273,7 +275,7 @@ fn inflation_start_large_kusama_block() { Inflation::on_initialize(0); } assert_eq!( - initial_issuance + (FIRST_YEAR_BLOCK_INFLATION * (YEAR / 100)), + initial_issuance + (FIRST_YEAR_BLOCK_INFLATION * ((YEAR as u64) / 100)), >::total_issuance() ); @@ -282,8 +284,8 @@ fn inflation_start_large_kusama_block() { let block_inflation_year_2 = block_inflation!(); // Expected 100-block inflation for year 2: 100 * 9.33% * initial issuance * 110% / YEAR == 1951 let expecter_year_2_inflation: u64 = (initial_issuance - + FIRST_YEAR_BLOCK_INFLATION * YEAR / 100) - * 933 * 100 / (10000 * YEAR); + + FIRST_YEAR_BLOCK_INFLATION * (YEAR as u64) / 100) + * 933 * 100 / (10000 * (YEAR as u64)); assert_eq!(block_inflation_year_2 / 10, expecter_year_2_inflation / 10); // divide by 10 for approx. equality }); } @@ -320,14 +322,14 @@ fn inflation_after_year_10_is_flat() { #[test] fn inflation_rate_by_year() { new_test_ext().execute_with(|| { - let payouts: u64 = YEAR / InflationBlockInterval::get() as u64; + let payouts = (YEAR / InflationBlockInterval::get()) as u64; // Inflation starts at 10% and does down by 2/3% every year until year 9 (included), // then it is flat. let payout_by_year: [u64; 11] = [1000, 933, 867, 800, 733, 667, 600, 533, 467, 400, 400]; // For accuracy total issuance = payout0 * payouts * 10; - let initial_issuance: u64 = payout_by_year[0] * payouts * 10; + let initial_issuance = payout_by_year[0] * payouts * 10; let _ = >::deposit(&1234, initial_issuance, Precision::Exact); assert_eq!(Balances::free_balance(1234), initial_issuance); diff --git a/runtime/common/tests/mod.rs b/runtime/common/tests/mod.rs index fd1a4c3af9..ae8f4ec732 100644 --- a/runtime/common/tests/mod.rs +++ b/runtime/common/tests/mod.rs @@ -19,7 +19,7 @@ pub use sp_runtime::AccountId32 as AccountId; use sp_runtime::{BuildStorage, Storage}; use up_common::types::AuraId; -use crate::{BuildGenesisConfig, ParachainInfoConfig, Runtime, RuntimeEvent, System}; +use crate::{ParachainInfoConfig, Runtime, RuntimeEvent, RuntimeGenesisConfig, System}; pub type Balance = u128; pub mod xcm; @@ -51,8 +51,8 @@ fn last_events(n: usize) -> Vec { fn new_test_ext(balances: Vec<(AccountId, Balance)>) -> sp_io::TestExternalities { let mut storage = make_basic_storage(); - pallet_balances::BuildGenesisConfig:: { balances } - .build_storage(&mut storage) + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut storage) .unwrap(); let mut ext = sp_io::TestExternalities::new(storage); @@ -95,7 +95,7 @@ fn make_basic_storage() -> Storage { .map(|acc| get_account_id_from_seed::(acc)) .collect::>(); - let cfg = BuildGenesisConfig { + let cfg = RuntimeGenesisConfig { collator_selection: CollatorSelectionConfig { invulnerables }, session: SessionConfig { keys }, parachain_info: ParachainInfoConfig { @@ -112,7 +112,7 @@ fn make_basic_storage() -> Storage { fn make_basic_storage() -> Storage { use crate::AuraConfig; - let cfg = BuildGenesisConfig { + let cfg = RuntimeGenesisConfig { aura: AuraConfig { authorities: vec![ get_from_seed::("Alice"), diff --git a/runtime/common/tests/xcm.rs b/runtime/common/tests/xcm.rs index ed8bbe5532..1175cca501 100644 --- a/runtime/common/tests/xcm.rs +++ b/runtime/common/tests/xcm.rs @@ -52,9 +52,9 @@ pub fn xcm_transact_is_forbidden() { let xcm_event = &last_events(1)[0]; match xcm_event { - RuntimeEvent::PolkadotXcm(pallet_xcm::Event::::Attempted( - Outcome::Incomplete(_weight, Error::NoPermission), - )) => { /* Pass */ } + RuntimeEvent::PolkadotXcm(pallet_xcm::Event::::Attempted { + outcome: Outcome::Incomplete(_weight, Error::NoPermission), + }) => { /* Pass */ } _ => panic!( "Expected PolkadotXcm.Attempted(Incomplete(_weight, NoPermission)),\ found: {xcm_event:#?}" diff --git a/runtime/tests/src/dispatch.rs b/runtime/tests/src/dispatch.rs new file mode 120000 index 0000000000..a37422cf94 --- /dev/null +++ b/runtime/tests/src/dispatch.rs @@ -0,0 +1 @@ +../../common/dispatch.rs \ No newline at end of file diff --git a/runtime/tests/src/lib.rs b/runtime/tests/src/lib.rs index 0dea323b78..6a5b461d75 100644 --- a/runtime/tests/src/lib.rs +++ b/runtime/tests/src/lib.rs @@ -33,23 +33,20 @@ use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::{H160, H256, U256}; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; use up_data_structs::mapping::{CrossTokenAddressMapping, EvmTokenAddressMapping}; -#[path = "../../common/dispatch.rs"] mod dispatch; use dispatch::CollectionDispatchT; -#[path = "../../common/weights/mod.rs"] mod weights; use weights::CommonWeights; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; +type Block = frame_system::mocking::MockBlockU32; #[cfg(test)] mod tests; @@ -73,13 +70,14 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const BlockHashCount: u64 = 250; + pub const BlockHashCount: u32 = 250; pub const SS58Prefix: u8 = 42; } impl system::Config for Test { type RuntimeEvent = RuntimeEvent; type BaseCallFilter = Everything; + type Block = Block; type BlockWeights = (); type BlockLength = (); type DbWeight = (); @@ -120,6 +118,7 @@ impl pallet_balances::Config for Test { type MaxFreezes = MaxLocks; type FreezeIdentifier = [u8; 8]; type MaxHolds = MaxLocks; + type RuntimeHoldReason = RuntimeHoldReason; } parameter_types! { @@ -232,6 +231,7 @@ impl pallet_evm::Config for Test { type OnMethodCall = (); type OnCreate = (); type OnChargeTransaction = (); + type OnCheckEvmTransaction = (); type FindAuthor = (); type BlockHashMapping = SubstrateBlockHashMapping; type Timestamp = Timestamp; @@ -296,8 +296,8 @@ impl pallet_unique::Config for Test { // Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { - system::GenesisConfig::default() - .build_storage::() + >::default() + .build_storage() .unwrap() .into() } diff --git a/runtime/tests/src/weights b/runtime/tests/src/weights new file mode 120000 index 0000000000..2436989693 --- /dev/null +++ b/runtime/tests/src/weights @@ -0,0 +1 @@ +../../common/weights \ No newline at end of file From ff74635c138f9636d4ddcd8faa661a8ca56a191c Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Mon, 9 Oct 2023 15:59:04 +0200 Subject: [PATCH 109/143] ci: fix eslint precommit regex --- .githooks/pre-commit | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.githooks/pre-commit b/.githooks/pre-commit index 1ffff6c3ef..1189a26b48 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -20,7 +20,7 @@ else exit 1 fi -STAGED_TEST_FILES=$(git diff --cached --name-only --diff-filter=ACM | grep ".ts$\|.js$") +STAGED_TEST_FILES=$(git diff --cached --name-only --diff-filter=ACM | grep "\.ts$\|\.js$") if [[ "$STAGED_TEST_FILES" = "" ]]; then echo -e "${GREEN}eslint succeded${NC}" @@ -39,4 +39,4 @@ else exit 1 fi -exit $? \ No newline at end of file +exit $? From 7489c8a676280d99a99fc27a0d4a4e1883ea13be Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Mon, 9 Oct 2023 16:06:18 +0200 Subject: [PATCH 110/143] doc: dev mode startup Co-authored-by: Daniel Shiposha --- README.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ef52b5fe75..11ad4b15e7 100644 --- a/README.md +++ b/README.md @@ -111,7 +111,22 @@ make build-release ## Running as Parachain locally -TODO +### Dev mode + +You can launch the node in the dev mode where blocks are sealed automatically each 500 ms or on each new transaction. + +* Opal Runtime: `cargo run --release -- --dev` +* Quartz Runtime: `cargo run --release --features quartz-runtime -- --dev` +* Unique Runtime: `cargo run --release --features unique-runtime -- --dev` + + You can tweak the dev mode with the following CLI options: + * --idle-autoseal-interval + When running the node in the `--dev` mode, an empty block will be sealed automatically after the `` milliseconds. + * --disable-autoseal-on-tx + Disable auto-sealing blocks on new transactions in the `--dev` mode + * --autoseal-finalization-delay + Finalization delay (in seconds) of auto-sealed blocks in the `--dev` mode. + Disabled by default. ## Run Integration Tests From c14994fb09011197559484f81b52ca626bb93fab Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Mon, 9 Oct 2023 16:04:55 +0200 Subject: [PATCH 111/143] doc: ethereum sponsoring clarification --- pallets/evm-transaction-payment/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pallets/evm-transaction-payment/src/lib.rs b/pallets/evm-transaction-payment/src/lib.rs index 62986c6890..6cea9bb625 100644 --- a/pallets/evm-transaction-payment/src/lib.rs +++ b/pallets/evm-transaction-payment/src/lib.rs @@ -89,6 +89,8 @@ fn get_sponsor( ) -> Option { let accept_gas_fee = |gas_fee| { let (base_fee, _) = T::FeeCalculator::min_gas_price(); + // Metamask specifies base fee twice as much as chain reported minGasPrice + // But we allow further leeway (why?), sponsored base_fee to be 2.1*minGasPrice, thus 21/10. base_fee <= gas_fee && gas_fee <= base_fee * 21 / 10 }; let (max_fee_per_gas, may_sponsor) = match (max_fee_per_gas, is_transactional) { From 4950cbc3976b92be45b582f1145a12f7e0830651 Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Mon, 9 Oct 2023 14:13:33 +0000 Subject: [PATCH 112/143] fix: update benchmarks to v2 --- pallets/app-promotion/src/benchmarking.rs | 332 ++++++--- .../collator-selection/src/benchmarking.rs | 241 ++++--- pallets/common/src/benchmarking.rs | 117 ++-- pallets/configuration/src/benchmarking.rs | 134 ++-- pallets/evm-migration/src/benchmarking.rs | 82 ++- pallets/foreign-assets/src/benchmarking.rs | 72 +- pallets/fungible/src/benchmarking.rs | 146 +++- pallets/identity/src/benchmarking.rs | 556 ++++++++++----- pallets/inflation/src/benchmarking.rs | 21 +- pallets/maintenance/src/benchmarking.rs | 56 +- pallets/nonfungible/src/benchmarking.rs | 473 +++++++++---- pallets/refungible/src/benchmarking.rs | 634 +++++++++++++----- pallets/structure/src/benchmarking.rs | 36 +- pallets/unique/src/benchmarking.rs | 207 ++++-- 14 files changed, 2255 insertions(+), 852 deletions(-) diff --git a/pallets/app-promotion/src/benchmarking.rs b/pallets/app-promotion/src/benchmarking.rs index 60e54b4b00..43425a2ae3 100644 --- a/pallets/app-promotion/src/benchmarking.rs +++ b/pallets/app-promotion/src/benchmarking.rs @@ -16,15 +16,24 @@ #![cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::{account, benchmarks}; -use frame_support::traits::{fungible::Unbalanced, OnInitialize}; -use frame_system::RawOrigin; +use frame_benchmarking::v2::*; +use frame_support::traits::{ + fungible::{Inspect, Mutate, Unbalanced}, + OnInitialize, +}; +use frame_system::{pallet_prelude::*, RawOrigin}; +use pallet_evm::account::CrossAccountId; use pallet_evm_migration::Pallet as EvmMigrationPallet; use pallet_unique::benchmarking::create_nft_collection; -use sp_runtime::traits::Bounded; +use sp_core::{Get, H160}; +use sp_runtime::{ + traits::{BlockNumberProvider, Bounded}, + Perbill, +}; +use sp_std::{iter::Sum, vec, vec::Vec}; -use super::*; -use crate::Pallet as PromototionPallet; +use super::{BalanceOf, Call, Config, Pallet, Staked, PENDING_LIMIT_PER_BLOCK}; +use crate::{pallet, Pallet as PromototionPallet}; const SEED: u32 = 0; @@ -49,55 +58,98 @@ where Ok(pallet_admin) } -benchmarks! { - where_clause{ - where T: Config + pallet_unique::Config + pallet_evm_migration::Config , +#[benchmarks( + where T: Config + pallet_unique::Config + pallet_evm_migration::Config , BlockNumberFor: From + Into, BalanceOf: Sum + From - } +)] +mod benchmarks { + use super::*; - on_initialize { - let b in 0..PENDING_LIMIT_PER_BLOCK; + #[benchmark] + fn on_initialize(b: Linear<0, PENDING_LIMIT_PER_BLOCK>) -> Result<(), BenchmarkError> { set_admin::()?; (0..b).try_for_each(|index| { let staker = account::("staker", index, SEED); - ::Currency::write_balance(&staker, Into::>::into(10_000u128) * T::Nominal::get())?; - PromototionPallet::::stake(RawOrigin::Signed(staker.clone()).into(), Into::>::into(100u128) * T::Nominal::get())?; + ::Currency::write_balance( + &staker, + Into::>::into(10_000u128) * T::Nominal::get(), + )?; + PromototionPallet::::stake( + RawOrigin::Signed(staker.clone()).into(), + Into::>::into(100u128) * T::Nominal::get(), + )?; PromototionPallet::::unstake_all(RawOrigin::Signed(staker).into())?; Result::<(), sp_runtime::DispatchError>::Ok(()) })?; - let block_number = >::current_block_number() + T::PendingInterval::get(); - }: {PromototionPallet::::on_initialize(block_number)} + let block_number = + >::current_block_number() + T::PendingInterval::get(); + + #[block] + { + PromototionPallet::::on_initialize(block_number); + } - set_admin_address { + Ok(()) + } + + #[benchmark] + fn set_admin_address() -> Result<(), BenchmarkError> { let pallet_admin = account::("admin", 0, SEED); - let _ = ::Currency::set_balance(&pallet_admin, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value()); - } : _(RawOrigin::Root, T::CrossAccountId::from_sub(pallet_admin)) + let _ = ::Currency::set_balance( + &pallet_admin, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + ); - payout_stakers{ - let b in 1..100; + #[extrinsic_call] + _(RawOrigin::Root, T::CrossAccountId::from_sub(pallet_admin)); - let pallet_admin = account::("admin", 1, SEED); - let share = Perbill::from_rational(1u32, 20); - PromototionPallet::::set_admin_address(RawOrigin::Root.into(), T::CrossAccountId::from_sub(pallet_admin.clone()))?; - ::Currency::write_balance(&pallet_admin, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value())?; - ::Currency::write_balance(&::TreasuryAccountId::get(), Perbill::from_rational(1u32, 2) * BalanceOf::::max_value())?; + Ok(()) + } - let stakers: Vec = (0..b).map(|index| account("staker", index, SEED)).collect(); + #[benchmark] + fn payout_stakers(b: Linear<0, 100>) -> Result<(), BenchmarkError> { + let pallet_admin = account::("admin", 1, SEED); + PromototionPallet::::set_admin_address( + RawOrigin::Root.into(), + T::CrossAccountId::from_sub(pallet_admin.clone()), + )?; + ::Currency::write_balance( + &pallet_admin, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + )?; + ::Currency::write_balance( + &::TreasuryAccountId::get(), + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + )?; + + let stakers: Vec = + (0..b).map(|index| account("staker", index, SEED)).collect(); stakers.iter().try_for_each(|staker| { - ::Currency::write_balance(staker, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value())?; + ::Currency::write_balance( + staker, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + )?; Result::<(), sp_runtime::DispatchError>::Ok(()) })?; (1..11).try_for_each(|i| { >::set_block_number(i.into()); - T::RelayBlockNumberProvider::set_block_number((2*i).into()); + T::RelayBlockNumberProvider::set_block_number((2 * i).into()); assert_eq!(>::block_number(), i.into()); - assert_eq!(T::RelayBlockNumberProvider::current_block_number(), (2*i).into()); - stakers.iter() - .map(|staker| { - PromototionPallet::::stake(RawOrigin::Signed(staker.clone()).into(), Into::>::into(100u128) * T::Nominal::get()) - }).collect::, _>>()?; + assert_eq!( + T::RelayBlockNumberProvider::current_block_number(), + (2 * i).into() + ); + stakers + .iter() + .map(|staker| { + PromototionPallet::::stake( + RawOrigin::Signed(staker.clone()).into(), + Into::>::into(100u128) * T::Nominal::get(), + ) + }) + .collect::, _>>()?; Result::<(), sp_runtime::DispatchError>::Ok(()) })?; @@ -107,83 +159,195 @@ benchmarks! { >::set_block_number(15_000.into()); T::RelayBlockNumberProvider::set_block_number(30_000.into()); - } : _(RawOrigin::Signed(pallet_admin.clone()), Some(b as u8)) - stake { + #[extrinsic_call] + _(RawOrigin::Signed(pallet_admin.clone()), Some(b as u8)); + + Ok(()) + } + + #[benchmark] + fn stake() -> Result<(), BenchmarkError> { let caller = account::("caller", 0, SEED); let share = Perbill::from_rational(1u32, 10); - let _ = ::Currency::write_balance(&caller, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value()); - } : _(RawOrigin::Signed(caller.clone()), share * ::Currency::total_balance(&caller)) - unstake_all { - let caller = account::("caller", 0, SEED); - let share = Perbill::from_rational(1u32, 20); - let _ = ::Currency::write_balance(&caller, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value()); - (1..11).map(|i| { - // used to change block number - >::set_block_number(i.into()); - T::RelayBlockNumberProvider::set_block_number((2*i).into()); - assert_eq!(>::block_number(), i.into()); - assert_eq!(T::RelayBlockNumberProvider::current_block_number(), (2*i).into()); - PromototionPallet::::stake(RawOrigin::Signed(caller.clone()).into(), share * ::Currency::total_balance(&caller)) - }).collect::, _>>()?; + let _ = ::Currency::write_balance( + &caller, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + ); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + share * ::Currency::total_balance(&caller), + ); - } : _(RawOrigin::Signed(caller.clone())) + Ok(()) + } - unstake_partial { + #[benchmark] + fn unstake_all() -> Result<(), BenchmarkError> { let caller = account::("caller", 0, SEED); let share = Perbill::from_rational(1u32, 20); - let _ = ::Currency::write_balance(&caller, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value()); - (1..11).map(|i| { - // used to change block number - >::set_block_number(i.into()); - T::RelayBlockNumberProvider::set_block_number((2*i).into()); - assert_eq!(>::block_number(), i.into()); - assert_eq!(T::RelayBlockNumberProvider::current_block_number(), (2*i).into()); - PromototionPallet::::stake(RawOrigin::Signed(caller.clone()).into(), Into::>::into(100u128) * T::Nominal::get()) - }).collect::, _>>()?; + let _ = ::Currency::write_balance( + &caller, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + ); + (1..11) + .map(|i| { + // used to change block number + >::set_block_number(i.into()); + T::RelayBlockNumberProvider::set_block_number((2 * i).into()); + assert_eq!(>::block_number(), i.into()); + assert_eq!( + T::RelayBlockNumberProvider::current_block_number(), + (2 * i).into() + ); + PromototionPallet::::stake( + RawOrigin::Signed(caller.clone()).into(), + share * ::Currency::total_balance(&caller), + ) + }) + .collect::, _>>()?; + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + + Ok(()) + } - } : _(RawOrigin::Signed(caller.clone()), Into::>::into(1000u128) * T::Nominal::get()) + #[benchmark] + fn unstake_partial() -> Result<(), BenchmarkError> { + let caller = account::("caller", 0, SEED); + let _ = ::Currency::write_balance( + &caller, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + ); + (1..11) + .map(|i| { + // used to change block number + >::set_block_number(i.into()); + T::RelayBlockNumberProvider::set_block_number((2 * i).into()); + assert_eq!(>::block_number(), i.into()); + assert_eq!( + T::RelayBlockNumberProvider::current_block_number(), + (2 * i).into() + ); + PromototionPallet::::stake( + RawOrigin::Signed(caller.clone()).into(), + Into::>::into(100u128) * T::Nominal::get(), + ) + }) + .collect::, _>>()?; + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + Into::>::into(1000u128) * T::Nominal::get(), + ); + + Ok(()) + } - sponsor_collection { + #[benchmark] + fn sponsor_collection() -> Result<(), BenchmarkError> { let pallet_admin = account::("admin", 0, SEED); - PromototionPallet::::set_admin_address(RawOrigin::Root.into(), T::CrossAccountId::from_sub(pallet_admin.clone()))?; - let _ = ::Currency::write_balance(&pallet_admin, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value()); + PromototionPallet::::set_admin_address( + RawOrigin::Root.into(), + T::CrossAccountId::from_sub(pallet_admin.clone()), + )?; + let _ = ::Currency::write_balance( + &pallet_admin, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + ); let caller: T::AccountId = account("caller", 0, SEED); - let _ = ::Currency::write_balance(&caller, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value()); + let _ = ::Currency::write_balance( + &caller, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + ); let collection = create_nft_collection::(caller)?; - } : _(RawOrigin::Signed(pallet_admin.clone()), collection) - stop_sponsoring_collection { + #[extrinsic_call] + _(RawOrigin::Signed(pallet_admin.clone()), collection); + + Ok(()) + } + + #[benchmark] + fn stop_sponsoring_collection() -> Result<(), BenchmarkError> { let pallet_admin = account::("admin", 0, SEED); - PromototionPallet::::set_admin_address(RawOrigin::Root.into(), T::CrossAccountId::from_sub(pallet_admin.clone()))?; - let _ = ::Currency::write_balance(&pallet_admin, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value()); + PromototionPallet::::set_admin_address( + RawOrigin::Root.into(), + T::CrossAccountId::from_sub(pallet_admin.clone()), + )?; + let _ = ::Currency::write_balance( + &pallet_admin, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + ); let caller: T::AccountId = account("caller", 0, SEED); - let _ = ::Currency::write_balance(&caller, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value()); + let _ = ::Currency::write_balance( + &caller, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + ); let collection = create_nft_collection::(caller)?; - PromototionPallet::::sponsor_collection(RawOrigin::Signed(pallet_admin.clone()).into(), collection)?; - } : _(RawOrigin::Signed(pallet_admin.clone()), collection) + PromototionPallet::::sponsor_collection( + RawOrigin::Signed(pallet_admin.clone()).into(), + collection, + )?; - sponsor_contract { - let pallet_admin = account::("admin", 0, SEED); - PromototionPallet::::set_admin_address(RawOrigin::Root.into(), T::CrossAccountId::from_sub(pallet_admin.clone()))?; + #[extrinsic_call] + _(RawOrigin::Signed(pallet_admin.clone()), collection); + + Ok(()) + } - let _ = ::Currency::write_balance(&pallet_admin, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value()); + #[benchmark] + fn sponsor_contract() -> Result<(), BenchmarkError> { + let pallet_admin = account::("admin", 0, SEED); + PromototionPallet::::set_admin_address( + RawOrigin::Root.into(), + T::CrossAccountId::from_sub(pallet_admin.clone()), + )?; + + let _ = ::Currency::write_balance( + &pallet_admin, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + ); let address = H160::from_low_u64_be(SEED as u64); let data: Vec = (0..20).collect(); >::begin(RawOrigin::Root.into(), address)?; >::finish(RawOrigin::Root.into(), address, data)?; - } : _(RawOrigin::Signed(pallet_admin.clone()), address) - stop_sponsoring_contract { - let pallet_admin = account::("admin", 0, SEED); - PromototionPallet::::set_admin_address(RawOrigin::Root.into(), T::CrossAccountId::from_sub(pallet_admin.clone()))?; + #[extrinsic_call] + _(RawOrigin::Signed(pallet_admin.clone()), address); + + Ok(()) + } - let _ = ::Currency::write_balance(&pallet_admin, Perbill::from_rational(1u32, 2) * BalanceOf::::max_value()); + #[benchmark] + fn stop_sponsoring_contract() -> Result<(), BenchmarkError> { + let pallet_admin = account::("admin", 0, SEED); + PromototionPallet::::set_admin_address( + RawOrigin::Root.into(), + T::CrossAccountId::from_sub(pallet_admin.clone()), + )?; + + let _ = ::Currency::write_balance( + &pallet_admin, + Perbill::from_rational(1u32, 2) * BalanceOf::::max_value(), + ); let address = H160::from_low_u64_be(SEED as u64); let data: Vec = (0..20).collect(); >::begin(RawOrigin::Root.into(), address)?; >::finish(RawOrigin::Root.into(), address, data)?; - PromototionPallet::::sponsor_contract(RawOrigin::Signed(pallet_admin.clone()).into(), address)?; - } : _(RawOrigin::Signed(pallet_admin.clone()), address) + PromototionPallet::::sponsor_contract( + RawOrigin::Signed(pallet_admin.clone()).into(), + address, + )?; + + #[extrinsic_call] + _(RawOrigin::Signed(pallet_admin.clone()), address); + + Ok(()) + } } diff --git a/pallets/collator-selection/src/benchmarking.rs b/pallets/collator-selection/src/benchmarking.rs index 43e38c5f0e..1d2edb8d37 100644 --- a/pallets/collator-selection/src/benchmarking.rs +++ b/pallets/collator-selection/src/benchmarking.rs @@ -32,7 +32,9 @@ //! Benchmarking setup for pallet-collator-selection -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::v2::{ + account, benchmarks, impl_benchmark_test_suite, whitelisted_caller, BenchmarkError, +}; use frame_support::{ assert_ok, traits::{ @@ -159,16 +161,17 @@ fn balance_unit() -> BalanceOf { /// Our benchmarking environment already has invulnerables registered. const INITIAL_INVULNERABLES: u32 = 2; -benchmarks! { - where_clause { where - T: Config + pallet_authorship::Config + session::Config - } +#[benchmarks(where T: Config + pallet_authorship::Config + session::Config)] +mod benchmarks { + use super::*; + const MAX_COLLATORS: u32 = 10; + const MAX_INVULNERABLES: u32 = MAX_COLLATORS - INITIAL_INVULNERABLES; // todo:collator this and all the following do not work for some reason, going all the way up to 10 in length // Both invulnerables and candidates count together against MaxCollators. // Maybe try putting it in braces? 1 .. (T::MaxCollators::get() - 2) - add_invulnerable { - let b in 1 .. T::MaxCollators::get() - INITIAL_INVULNERABLES - 1; + #[benchmark] + fn add_invulnerable(b: Linear<1, MAX_COLLATORS>) -> Result<(), BenchmarkError> { register_validators::(b); register_invulnerables::(b); @@ -181,39 +184,59 @@ benchmarks! { >::set_keys( RawOrigin::Signed(new_invulnerable.clone()).into(), keys::(b + 1), - Vec::new() - ).unwrap(); + Vec::new(), + ) + .unwrap(); let root_origin = T::UpdateOrigin::try_successful_origin().unwrap(); - }: { - assert_ok!( - >::add_invulnerable(root_origin, new_invulnerable.clone()) + + #[block] + { + assert_ok!(>::add_invulnerable( + root_origin, + new_invulnerable.clone() + )); + } + + assert_last_event::( + Event::InvulnerableAdded { + invulnerable: new_invulnerable, + } + .into(), ); - } - verify { - assert_last_event::(Event::InvulnerableAdded{invulnerable: new_invulnerable}.into()); + + Ok(()) } - remove_invulnerable { - let b in 1 .. T::MaxCollators::get() - INITIAL_INVULNERABLES - 1; + #[benchmark] + fn remove_invulnerable(b: Linear<1, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { register_validators::(b); register_invulnerables::(b); let root_origin = T::UpdateOrigin::try_successful_origin().unwrap(); let leaving = >::get().last().unwrap().clone(); whitelist!(leaving); - }: { - assert_ok!( - >::remove_invulnerable(root_origin, leaving.clone()) + + #[block] + { + assert_ok!(>::remove_invulnerable( + root_origin, + leaving.clone() + )); + } + + assert_last_event::( + Event::InvulnerableRemoved { + invulnerable: leaving, + } + .into(), ); - } - verify { - assert_last_event::(Event::InvulnerableRemoved{invulnerable: leaving}.into()); - } - get_license { - let c in 1 .. T::MaxCollators::get() - 1; + Ok(()) + } + #[benchmark] + fn get_license(c: Linear<1, MAX_COLLATORS>) -> Result<(), BenchmarkError> { register_validators::(c); get_licenses::(c); @@ -224,19 +247,28 @@ benchmarks! { >::set_keys( RawOrigin::Signed(caller.clone()).into(), keys::(c + 1), - Vec::new() - ).unwrap(); + Vec::new(), + ) + .unwrap(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + + assert_last_event::( + Event::LicenseObtained { + account_id: caller, + deposit: bond / 2u32.into(), + } + .into(), + ); - }: _(RawOrigin::Signed(caller.clone())) - verify { - assert_last_event::(Event::LicenseObtained{account_id: caller, deposit: bond / 2u32.into()}.into()); + Ok(()) } // worst case is when we have all the max-candidate slots filled except one, and we fill that // one. - onboard { - let c in 1 .. T::MaxCollators::get() - INITIAL_INVULNERABLES - 1; - + #[benchmark] + fn onboard(c: Linear<1, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { register_validators::(c); register_candidates::(c); @@ -246,37 +278,47 @@ benchmarks! { let origin = RawOrigin::Signed(caller.clone()); - >::set_keys( - origin.clone().into(), - keys::(c + 1), - Vec::new() - ).unwrap(); + >::set_keys(origin.clone().into(), keys::(c + 1), Vec::new()) + .unwrap(); - assert_ok!( - >::get_license(origin.clone().into()) - ); - }: _(origin) - verify { - assert_last_event::(Event::CandidateAdded{account_id: caller}.into()); + assert_ok!(>::get_license(origin.clone().into())); + + #[extrinsic_call] + _(origin); + + assert_last_event::(Event::CandidateAdded { account_id: caller }.into()); + + Ok(()) } // worst case is the last candidate leaving. - offboard { - let c in 1 .. T::MaxCollators::get() - INITIAL_INVULNERABLES; + #[benchmark] + fn offboard(c: Linear<0, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { + let c = c + 1; register_validators::(c); register_candidates::(c); let leaving = >::get().last().unwrap().clone(); whitelist!(leaving); - }: _(RawOrigin::Signed(leaving.clone())) - verify { - assert_last_event::(Event::CandidateRemoved{account_id: leaving}.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(leaving.clone())); + + assert_last_event::( + Event::CandidateRemoved { + account_id: leaving, + } + .into(), + ); + + Ok(()) } // worst case is the last candidate leaving. - release_license { - let c in 1 .. T::MaxCollators::get() - INITIAL_INVULNERABLES; + #[benchmark] + fn release_license(c: Linear<0, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { + let c = c + 1; let bond = balance_unit::(); register_validators::(c); @@ -284,14 +326,25 @@ benchmarks! { let leaving = >::get().last().unwrap().clone(); whitelist!(leaving); - }: _(RawOrigin::Signed(leaving.clone())) - verify { - assert_last_event::(Event::LicenseReleased{account_id: leaving, deposit_returned: bond}.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(leaving.clone())); + + assert_last_event::( + Event::LicenseReleased { + account_id: leaving, + deposit_returned: bond, + } + .into(), + ); + + Ok(()) } // worst case is the last candidate leaving. - force_release_license { - let c in 1 .. T::MaxCollators::get() - INITIAL_INVULNERABLES; + #[benchmark] + fn force_release_license(c: Linear<0, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { + let c = c + 1; let bond = balance_unit::(); register_validators::(c); @@ -300,44 +353,65 @@ benchmarks! { let leaving = >::get().last().unwrap().clone(); whitelist!(leaving); let origin = T::UpdateOrigin::try_successful_origin().unwrap(); - }: { - assert_ok!( - >::force_release_license(origin, leaving.clone()) + + #[block] + { + assert_ok!(>::force_release_license( + origin, + leaving.clone() + )); + } + + assert_last_event::( + Event::LicenseReleased { + account_id: leaving, + deposit_returned: bond, + } + .into(), ); - } - verify { - assert_last_event::(Event::LicenseReleased{account_id: leaving, deposit_returned: bond}.into()); + + Ok(()) } // worst case is paying a non-existing candidate account. - note_author { + #[benchmark] + fn note_author() -> Result<(), BenchmarkError> { T::Currency::set_balance( &>::account_id(), balance_unit::() * 4u32.into(), ); let author = account("author", 0, SEED); - let new_block: BlockNumberFor= 10u32.into(); + let new_block: BlockNumberFor = 10u32.into(); frame_system::Pallet::::set_block_number(new_block); assert!(T::Currency::balance(&author) == 0u32.into()); - }: { - as EventHandler<_, _>>::note_author(author.clone()) - } verify { + + #[block] + { + as EventHandler<_, _>>::note_author(author.clone()); + } + assert!(T::Currency::balance(&author) > 0u32.into()); assert_eq!(frame_system::Pallet::::block_number(), new_block); + + Ok(()) } // worst case for new session. - new_session { - let r in 1 .. T::MaxCollators::get() - INITIAL_INVULNERABLES; - let c in 1 .. T::MaxCollators::get() - INITIAL_INVULNERABLES; + #[benchmark] + fn new_session( + r: Linear<0, MAX_INVULNERABLES>, + c: Linear<0, MAX_INVULNERABLES>, + ) -> Result<(), BenchmarkError> { + let r = r + 1; + let c = c + 1; frame_system::Pallet::::set_block_number(0u32.into()); register_validators::(c); register_candidates::(c); - let new_block: BlockNumberFor= 1800u32.into(); + let new_block: BlockNumberFor = 1800u32.into(); let zero_block: BlockNumberFor = 0u32.into(); let candidates = >::get(); @@ -362,19 +436,24 @@ benchmarks! { frame_system::Pallet::::set_block_number(new_block); assert!(>::get().len() == c as usize); - }: { - as SessionManager<_>>::new_session(0) - } verify { + + #[block] + { + as SessionManager<_>>::new_session(0); + } + if c > r { assert!(>::get().len() < pre_length); } else { assert!(>::get().len() == pre_length); } + + Ok(()) } -} -impl_benchmark_test_suite!( - CollatorSelection, - crate::mock::new_test_ext(), - crate::mock::Test, -); + impl_benchmark_test_suite!( + CollatorSelection, + crate::mock::new_test_ext(), + crate::mock::Test, + ); +} diff --git a/pallets/common/src/benchmarking.rs b/pallets/common/src/benchmarking.rs index 876fb3a095..1d013e552e 100644 --- a/pallets/common/src/benchmarking.rs +++ b/pallets/common/src/benchmarking.rs @@ -18,7 +18,7 @@ use core::convert::TryInto; -use frame_benchmarking::{account, benchmarks}; +use frame_benchmarking::{account, v2::*}; use frame_support::{ pallet_prelude::ConstU32, traits::{fungible::Balanced, tokens::Precision, Get, Imbalance}, @@ -26,7 +26,7 @@ use frame_support::{ }; use pallet_evm::account::CrossAccountId; use sp_runtime::{traits::Zero, DispatchError}; -use sp_std::vec::Vec; +use sp_std::{vec, vec::Vec}; use up_data_structs::{ AccessMode, CollectionId, CollectionMode, CollectionPermissions, CreateCollectionData, NestingPermissions, PropertiesPermissionMap, Property, PropertyKey, PropertyValue, @@ -178,62 +178,103 @@ macro_rules! bench_init { () => {} } -benchmarks! { - set_collection_properties { - let b in 0..MAX_PROPERTIES_PER_ITEM; - bench_init!{ +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn set_collection_properties( + b: Linear<0, MAX_PROPERTIES_PER_ITEM>, + ) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let props = (0..b).map(|p| Property { - key: property_key(p as usize), - value: property_value(), - }).collect::>(); - }: {>::set_collection_properties(&collection, &owner, props.into_iter())?} - - delete_collection_properties { - let b in 0..MAX_PROPERTIES_PER_ITEM; - bench_init!{ + let props = (0..b) + .map(|p| Property { + key: property_key(p as usize), + value: property_value(), + }) + .collect::>(); + + #[block] + { + >::set_collection_properties(&collection, &owner, props.into_iter())?; + } + + Ok(()) + } + + #[benchmark] + fn delete_collection_properties( + b: Linear<0, MAX_PROPERTIES_PER_ITEM>, + ) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let props = (0..b).map(|p| Property { - key: property_key(p as usize), - value: property_value(), - }).collect::>(); + let props = (0..b) + .map(|p| Property { + key: property_key(p as usize), + value: property_value(), + }) + .collect::>(); >::set_collection_properties(&collection, &owner, props.into_iter())?; let to_delete = (0..b).map(|p| property_key(p as usize)).collect::>(); - }: {>::delete_collection_properties(&collection, &owner, to_delete.into_iter())?} - check_accesslist{ - bench_init!{ + #[block] + { + >::delete_collection_properties(&collection, &owner, to_delete.into_iter())?; + } + + Ok(()) + } + + #[benchmark] + fn check_accesslist() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); }; let mut collection_handle = >::try_get(collection.id)?; - >::update_permissions( - &sender, - &mut collection_handle, - CollectionPermissions { access: Some(AccessMode::AllowList), ..Default::default() } - )?; + >::update_permissions( + &sender, + &mut collection_handle, + CollectionPermissions { + access: Some(AccessMode::AllowList), + ..Default::default() + }, + )?; - >::toggle_allowlist( - &collection, - &sender, - &sender, - true, - )?; + >::toggle_allowlist(&collection, &sender, &sender, true)?; - assert_eq!(collection_handle.permissions.access(), AccessMode::AllowList); + assert_eq!( + collection_handle.permissions.access(), + AccessMode::AllowList + ); - }: {collection_handle.check_allowlist(&sender)?;} + #[block] + { + collection_handle.check_allowlist(&sender)?; + } + + Ok(()) + } - init_token_properties_common { - bench_init!{ + #[benchmark] + fn init_token_properties_common() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: sub; sender: cross_from_sub(sender); }; - }: {load_is_admin_and_property_permissions(&collection, &sender);} + + #[block] + { + load_is_admin_and_property_permissions(&collection, &sender); + } + + Ok(()) + } } diff --git a/pallets/configuration/src/benchmarking.rs b/pallets/configuration/src/benchmarking.rs index 69beb609a0..54c5e4a9bb 100644 --- a/pallets/configuration/src/benchmarking.rs +++ b/pallets/configuration/src/benchmarking.rs @@ -16,9 +16,10 @@ //! Benchmarking setup for pallet-configuration -use frame_benchmarking::benchmarks; +use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::{pallet_prelude::*, EventRecord, RawOrigin}; +use sp_std::vec; use super::*; @@ -30,66 +31,117 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { assert_eq!(event, &system_event); } -benchmarks! { - where_clause { where +#[benchmarks( + where T: Config, T::Balance: From - } +)] +mod benchmarks { + use super::*; - set_weight_to_fee_coefficient_override { + #[benchmark] + fn set_weight_to_fee_coefficient_override() -> Result<(), BenchmarkError> { let coeff: u64 = 999; - }: { - assert_ok!( - >::set_weight_to_fee_coefficient_override(RawOrigin::Root.into(), Some(coeff)) - ); + + #[block] + { + assert_ok!(>::set_weight_to_fee_coefficient_override( + RawOrigin::Root.into(), + Some(coeff) + )); + } + + Ok(()) } - set_min_gas_price_override { + #[benchmark] + fn set_min_gas_price_override() -> Result<(), BenchmarkError> { let coeff: u64 = 999; - }: { - assert_ok!( - >::set_min_gas_price_override(RawOrigin::Root.into(), Some(coeff)) - ); + + #[block] + { + assert_ok!(>::set_min_gas_price_override( + RawOrigin::Root.into(), + Some(coeff) + )); + } + + Ok(()) } - set_app_promotion_configuration_override { + #[benchmark] + fn set_app_promotion_configuration_override() -> Result<(), BenchmarkError> { let configuration: AppPromotionConfiguration> = Default::default(); - }: { - assert_ok!( - >::set_app_promotion_configuration_override(RawOrigin::Root.into(), configuration) - ); + + #[block] + { + assert_ok!(>::set_app_promotion_configuration_override( + RawOrigin::Root.into(), + configuration + )); + } + + Ok(()) } - set_collator_selection_desired_collators { + #[benchmark] + fn set_collator_selection_desired_collators() -> Result<(), BenchmarkError> { let max: u32 = 999; - }: { - assert_ok!( - >::set_collator_selection_desired_collators(RawOrigin::Root.into(), Some(max)) + + #[block] + { + assert_ok!(>::set_collator_selection_desired_collators( + RawOrigin::Root.into(), + Some(max) + )); + } + + assert_last_event::( + Event::NewDesiredCollators { + desired_collators: Some(max), + } + .into(), ); - } - verify { - assert_last_event::(Event::NewDesiredCollators{desired_collators: Some(max)}.into()); + + Ok(()) } - set_collator_selection_license_bond { + #[benchmark] + fn set_collator_selection_license_bond() -> Result<(), BenchmarkError> { let bond_cost: Option = Some(1000u32.into()); - }: { - assert_ok!( - >::set_collator_selection_license_bond(RawOrigin::Root.into(), bond_cost) - ); - } - verify { - assert_last_event::(Event::NewCollatorLicenseBond{bond_cost}.into()); + + #[block] + { + assert_ok!(>::set_collator_selection_license_bond( + RawOrigin::Root.into(), + bond_cost + )); + } + + assert_last_event::(Event::NewCollatorLicenseBond { bond_cost }.into()); + + Ok(()) } - set_collator_selection_kick_threshold { + #[benchmark] + fn set_collator_selection_kick_threshold() -> Result<(), BenchmarkError> { let threshold: Option> = Some(900u32.into()); - }: { - assert_ok!( - >::set_collator_selection_kick_threshold(RawOrigin::Root.into(), threshold) + + #[block] + { + assert_ok!(>::set_collator_selection_kick_threshold( + RawOrigin::Root.into(), + threshold + )); + } + + assert_last_event::( + Event::NewCollatorKickThreshold { + length_in_blocks: threshold, + } + .into(), ); - } - verify { - assert_last_event::(Event::NewCollatorKickThreshold{length_in_blocks: threshold}.into()); + + Ok(()) } } diff --git a/pallets/evm-migration/src/benchmarking.rs b/pallets/evm-migration/src/benchmarking.rs index 49cf5e63e3..d3dc0d6b65 100644 --- a/pallets/evm-migration/src/benchmarking.rs +++ b/pallets/evm-migration/src/benchmarking.rs @@ -16,21 +16,29 @@ #![allow(missing_docs)] -use frame_benchmarking::benchmarks; +use frame_benchmarking::v2::*; use frame_system::RawOrigin; use sp_core::{H160, H256}; use sp_std::{vec, vec::Vec}; use super::{Call, Config, Pallet}; -benchmarks! { - where_clause { where ::RuntimeEvent: parity_scale_codec::Encode } +#[benchmarks( + where ::RuntimeEvent: parity_scale_codec::Encode +)] +mod benchmarks { + use super::*; - begin { - }: _(RawOrigin::Root, H160::default()) + #[benchmark] + fn begin() -> Result<(), BenchmarkError> { + #[extrinsic_call] + _(RawOrigin::Root, H160::default()); - set_data { - let b in 0..80; + Ok(()) + } + + #[benchmark] + fn set_data(b: Linear<0, 80>) -> Result<(), BenchmarkError> { let address = H160::from_low_u64_be(b as u64); let mut data = Vec::new(); for i in 0..b { @@ -40,27 +48,51 @@ benchmarks! { )); } >::begin(RawOrigin::Root.into(), address)?; - }: _(RawOrigin::Root, address, data) - finish { - let b in 0..80; + #[extrinsic_call] + _(RawOrigin::Root, address, data); + + Ok(()) + } + + #[benchmark] + fn finish(b: Linear<0, 80>) -> Result<(), BenchmarkError> { let address = H160::from_low_u64_be(b as u64); let data: Vec = (0..b as u8).collect(); >::begin(RawOrigin::Root.into(), address)?; - }: _(RawOrigin::Root, address, data) - - insert_eth_logs { - let b in 0..200; - let logs = (0..b).map(|_| ethereum::Log { - address: H160([b as u8; 20]), - data: vec![b as u8; 128], - topics: vec![H256([b as u8; 32]); 6], - }).collect::>(); - }: _(RawOrigin::Root, logs) - - insert_events { - let b in 0..200; + + #[extrinsic_call] + _(RawOrigin::Root, address, data); + + Ok(()) + } + + #[benchmark] + fn insert_eth_logs(b: Linear<0, 200>) -> Result<(), BenchmarkError> { + let logs = (0..b) + .map(|_| ethereum::Log { + address: H160([b as u8; 20]), + data: vec![b as u8; 128], + topics: vec![H256([b as u8; 32]); 6], + }) + .collect::>(); + + #[extrinsic_call] + _(RawOrigin::Root, logs); + + Ok(()) + } + + #[benchmark] + fn insert_events(b: Linear<0, 200>) -> Result<(), BenchmarkError> { use parity_scale_codec::Encode; - let logs = (0..b).map(|_| ::RuntimeEvent::from(crate::Event::::TestEvent).encode()).collect::>(); - }: _(RawOrigin::Root, logs) + let logs = (0..b) + .map(|_| ::RuntimeEvent::from(crate::Event::::TestEvent).encode()) + .collect::>(); + + #[extrinsic_call] + _(RawOrigin::Root, logs); + + Ok(()) + } } diff --git a/pallets/foreign-assets/src/benchmarking.rs b/pallets/foreign-assets/src/benchmarking.rs index 93bbba3085..f8bbbb4519 100644 --- a/pallets/foreign-assets/src/benchmarking.rs +++ b/pallets/foreign-assets/src/benchmarking.rs @@ -16,10 +16,10 @@ #![allow(missing_docs)] -use frame_benchmarking::{account, benchmarks}; +use frame_benchmarking::{account, v2::*}; use frame_support::traits::Currency; use frame_system::RawOrigin; -use sp_std::{boxed::Box, vec::Vec}; +use sp_std::{boxed::Box, vec, vec::Vec}; use staging_xcm::{opaque::latest::Junction::Parachain, v3::Junctions::X1, VersionedMultiLocation}; use super::{Call, Config, Pallet}; @@ -31,42 +31,74 @@ fn bounded>>(slice: &[u8]) -> T { .unwrap() } -benchmarks! { - register_foreign_asset { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn register_foreign_asset() -> Result<(), BenchmarkError> { let owner: T::AccountId = account("user", 0, 1); let location: VersionedMultiLocation = VersionedMultiLocation::from(X1(Parachain(1000))); - let metadata: AssetMetadata<<::Currency as Currency<::AccountId>>::Balance> = AssetMetadata{ + let metadata: AssetMetadata< + <::Currency as Currency<::AccountId>>::Balance, + > = AssetMetadata { name: bounded(b"name"), symbol: bounded(b"symbol"), decimals: 18, - minimal_balance: 1u32.into() + minimal_balance: 1u32.into(), }; - let mut balance: <::Currency as Currency<::AccountId>>::Balance = - 4_000_000_000u32.into(); + let mut balance: <::Currency as Currency< + ::AccountId, + >>::Balance = 4_000_000_000u32.into(); balance = balance * balance; - ::Currency::make_free_balance_be(&owner, - balance); - }: _(RawOrigin::Root, owner, Box::new(location), Box::new(metadata)) + ::Currency::make_free_balance_be(&owner, balance); + + #[extrinsic_call] + _( + RawOrigin::Root, + owner, + Box::new(location), + Box::new(metadata), + ); - update_foreign_asset { + Ok(()) + } + + #[benchmark] + fn update_foreign_asset() -> Result<(), BenchmarkError> { let owner: T::AccountId = account("user", 0, 1); let location: VersionedMultiLocation = VersionedMultiLocation::from(X1(Parachain(2000))); - let metadata: AssetMetadata<<::Currency as Currency<::AccountId>>::Balance> = AssetMetadata{ + let metadata: AssetMetadata< + <::Currency as Currency<::AccountId>>::Balance, + > = AssetMetadata { name: bounded(b"name"), symbol: bounded(b"symbol"), decimals: 18, - minimal_balance: 1u32.into() + minimal_balance: 1u32.into(), }; - let metadata2: AssetMetadata<<::Currency as Currency<::AccountId>>::Balance> = AssetMetadata{ + let metadata2: AssetMetadata< + <::Currency as Currency<::AccountId>>::Balance, + > = AssetMetadata { name: bounded(b"name2"), symbol: bounded(b"symbol2"), decimals: 18, - minimal_balance: 1u32.into() + minimal_balance: 1u32.into(), }; - let mut balance: <::Currency as Currency<::AccountId>>::Balance = - 4_000_000_000u32.into(); + let mut balance: <::Currency as Currency< + ::AccountId, + >>::Balance = 4_000_000_000u32.into(); balance = balance * balance; ::Currency::make_free_balance_be(&owner, balance); - Pallet::::register_foreign_asset(RawOrigin::Root.into(), owner, Box::new(location.clone()), Box::new(metadata))?; - }: _(RawOrigin::Root, 0, Box::new(location), Box::new(metadata2)) + Pallet::::register_foreign_asset( + RawOrigin::Root.into(), + owner, + Box::new(location.clone()), + Box::new(metadata), + )?; + + #[extrinsic_call] + _(RawOrigin::Root, 0, Box::new(location), Box::new(metadata2)); + + Ok(()) + } } diff --git a/pallets/fungible/src/benchmarking.rs b/pallets/fungible/src/benchmarking.rs index ea03bae74b..a9c3a5cdaa 100644 --- a/pallets/fungible/src/benchmarking.rs +++ b/pallets/fungible/src/benchmarking.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use frame_benchmarking::{account, benchmarks}; +use frame_benchmarking::{account, v2::*}; use pallet_common::{bench_init, benchmarking::create_collection_raw}; use sp_std::prelude::*; use up_data_structs::{budget::Unlimited, CollectionMode, MAX_ITEMS_PER_BATCH}; @@ -35,83 +35,159 @@ fn create_collection( ) } -benchmarks! { - create_item { - bench_init!{ +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn create_item() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); to: cross_sub; }; - }: {>::create_item(&collection, &sender, (to, 200), &Unlimited)?} - create_multiple_items_ex { - let b in 0..MAX_ITEMS_PER_BATCH; - bench_init!{ + #[block] + { + >::create_item(&collection, &sender, (to, 200), &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn create_multiple_items_ex(b: Linear<0, MAX_ITEMS_PER_BATCH>) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); }; - let data = (0..b).map(|i| { - bench_init!(to: cross_sub(i);); - (to, 200) - }).collect::>(); - }: {>::create_multiple_items(&collection, &sender, data, &Unlimited)?} - - burn_item { - bench_init!{ + let data = (0..b) + .map(|i| { + bench_init!(to: cross_sub(i);); + (to, 200) + }) + .collect::>(); + + #[block] + { + >::create_multiple_items(&collection, &sender, data, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn burn_item() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; burner: cross_sub; }; >::create_item(&collection, &owner, (burner.clone(), 200), &Unlimited)?; - }: {>::burn(&collection, &burner, 100)?} - transfer_raw { - bench_init!{ + #[block] + { + >::burn(&collection, &burner, 100)?; + } + + Ok(()) + } + + #[benchmark] + fn transfer_raw() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; to: cross_sub; }; >::create_item(&collection, &owner, (sender.clone(), 200), &Unlimited)?; - }: {>::transfer(&collection, &sender, &to, 200, &Unlimited)?} - approve { - bench_init!{ + #[block] + { + >::transfer(&collection, &sender, &to, 200, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn approve() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; }; >::create_item(&collection, &owner, (sender.clone(), 200), &Unlimited)?; - }: {>::set_allowance(&collection, &sender, &spender, 100)?} - approve_from { - bench_init!{ + #[block] + { + >::set_allowance(&collection, &sender, &spender, 100)?; + } + + Ok(()) + } + + #[benchmark] + fn approve_from() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; }; let owner_eth = T::CrossAccountId::from_eth(*sender.as_eth()); >::create_item(&collection, &owner, (owner_eth.clone(), 200), &Unlimited)?; - }: {>::set_allowance_from(&collection, &sender, &owner_eth, &spender, 100)?} - check_allowed_raw { - bench_init!{ + #[block] + { + >::set_allowance_from(&collection, &sender, &owner_eth, &spender, 100)?; + } + + Ok(()) + } + + #[benchmark] + fn check_allowed_raw() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; }; >::create_item(&collection, &owner, (sender.clone(), 200), &Unlimited)?; >::set_allowance(&collection, &sender, &spender, 200)?; - }: {>::check_allowed(&collection, &spender, &sender, 200, &Unlimited)?;} - set_allowance_unchecked_raw { - bench_init!{ + #[block] + { + >::check_allowed(&collection, &spender, &sender, 200, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn set_allowance_unchecked_raw() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; }; >::create_item(&collection, &owner, (sender.clone(), 200), &Unlimited)?; - }: {>::set_allowance_unchecked(&collection, &sender, &spender, 200);} - burn_from { - bench_init!{ + #[block] + { + >::set_allowance_unchecked(&collection, &sender, &spender, 200); + } + + Ok(()) + } + + #[benchmark] + fn burn_from() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; burner: cross_sub; }; >::create_item(&collection, &owner, (sender.clone(), 200), &Unlimited)?; >::set_allowance(&collection, &sender, &burner, 200)?; - }: {>::burn_from(&collection, &burner, &sender, 100, &Unlimited)?} + + #[block] + { + >::burn_from(&collection, &burner, &sender, 100, &Unlimited)? + } + + Ok(()) + } } diff --git a/pallets/identity/src/benchmarking.rs b/pallets/identity/src/benchmarking.rs index 62091e743f..647a7c23ec 100644 --- a/pallets/identity/src/benchmarking.rs +++ b/pallets/identity/src/benchmarking.rs @@ -37,11 +37,8 @@ #![cfg(feature = "runtime-benchmarks")] #![allow(clippy::no_effect)] -use frame_benchmarking::{account, benchmarks, whitelisted_caller}; -use frame_support::{ - assert_ok, ensure, - traits::{EnsureOrigin, Get}, -}; +use frame_benchmarking::v2::*; +use frame_support::{assert_ok, ensure, traits::EnsureOrigin}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -145,25 +142,48 @@ fn balance_unit() -> >::Balance 200u32.into() } -benchmarks! { - add_registrar { - let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - ensure!(Registrars::::get().len() as u32 == r, "Registrars not set up correctly."); +#[benchmarks] +mod benchmarks { + use super::*; + + const MAX_REGISTRARS: u32 = 20; + const MAX_ADDITIONAL_FIELDS: u32 = 100; + const MAX_SUB_ACCOUNTS: u32 = 100; + + #[benchmark] + fn add_registrar(r: Linear<2, MAX_REGISTRARS>) -> Result<(), BenchmarkError> { + let r = r - 1; + add_registrars::(r)?; + ensure!( + Registrars::::get().len() as u32 == r, + "Registrars not set up correctly." + ); let origin = T::RegistrarOrigin::try_successful_origin().unwrap(); let account = T::Lookup::unlookup(account("registrar", r + 1, SEED)); - }: _(origin, account) - verify { - ensure!(Registrars::::get().len() as u32 == r + 1, "Registrars not added."); + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, account); + + ensure!( + Registrars::::get().len() as u32 == r + 1, + "Registrars not added." + ); + + Ok(()) } - set_identity { - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get(); + #[benchmark] + fn set_identity( + x: Linear<0, MAX_ADDITIONAL_FIELDS>, + r: Linear<1, MAX_REGISTRARS>, + ) -> Result<(), BenchmarkError> { + add_registrars::(r)?; let caller = { // The target user let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - let caller_origin: ::RuntimeOrigin = RawOrigin::Signed(caller.clone()).into(); + let caller_origin: ::RuntimeOrigin = + RawOrigin::Signed(caller.clone()).into(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Add an initial identity @@ -173,8 +193,7 @@ benchmarks! { // User requests judgement from all the registrars, and they approve for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); - let registrar_lookup = T::Lookup::unlookup(registrar.clone()); - let balance_to_use = balance_unit::() * 10u32.into(); + let balance_to_use = balance_unit::() * 10u32.into(); let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); Identity::::request_judgement(caller_origin.clone(), i, 10u32.into())?; @@ -188,66 +207,91 @@ benchmarks! { } caller }; - }: _(RawOrigin::Signed(caller.clone()), Box::new(create_identity_info::(x))) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + Box::new(create_identity_info::(x)), + ); + assert_last_event::(Event::::IdentitySet { who: caller }.into()); + + Ok(()) } // We need to split `set_subs` into two benchmarks to accurately isolate the potential // writes caused by new or old sub accounts. The actual weight should simply be // the sum of these two weights. - set_subs_new { + #[benchmark] + fn set_subs_new(s: Linear<0, MAX_SUB_ACCOUNTS>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); // Create a new subs vec with s sub accounts - let s in 0 .. T::MaxSubAccounts::get() => (); let subs = create_sub_accounts::(&caller, s)?; - ensure!(SubsOf::::get(&caller).1.len() == 0, "Caller already has subs"); - }: set_subs(RawOrigin::Signed(caller.clone()), subs) - verify { - ensure!(SubsOf::::get(&caller).1.len() as u32 == s, "Subs not added"); + ensure!( + SubsOf::::get(&caller).1.len() == 0, + "Caller already has subs" + ); + + #[extrinsic_call] + set_subs(RawOrigin::Signed(caller.clone()), subs); + + ensure!( + SubsOf::::get(&caller).1.len() as u32 == s, + "Subs not added" + ); + + Ok(()) } - set_subs_old { + #[benchmark] + fn set_subs_old(p: Linear<0, MAX_SUB_ACCOUNTS>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); // Give them p many previous sub accounts. - let p in 0 .. T::MaxSubAccounts::get() => { - let _ = add_sub_accounts::(&caller, p)?; - }; + let _ = add_sub_accounts::(&caller, p)?; // Remove all subs. let subs = create_sub_accounts::(&caller, 0)?; ensure!( SubsOf::::get(&caller).1.len() as u32 == p, "Caller does have subs", ); - }: set_subs(RawOrigin::Signed(caller.clone()), subs) - verify { + + #[extrinsic_call] + set_subs(RawOrigin::Signed(caller.clone()), subs); + ensure!(SubsOf::::get(&caller).1.len() == 0, "Subs not removed"); + + Ok(()) } - clear_identity { + #[benchmark] + fn clear_identity( + r: Linear<1, MAX_REGISTRARS>, + s: Linear<0, MAX_SUB_ACCOUNTS>, + x: Linear<0, MAX_ADDITIONAL_FIELDS>, + ) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); let caller_lookup = ::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let s in 0 .. T::MaxSubAccounts::get() => { + add_registrars::(r)?; + + { // Give them s many sub accounts let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; - }; - let x in 0 .. T::MaxAdditionalFields::get(); + } // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); Identity::::set_identity(caller_origin.clone(), Box::new(info.clone()))?; // User requests judgement from all the registrars, and they approve for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); - let balance_to_use = balance_unit::() * 10u32.into(); + let balance_to_use = balance_unit::() * 10u32.into(); let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); Identity::::request_judgement(caller_origin.clone(), i, 10u32.into())?; @@ -259,108 +303,199 @@ benchmarks! { T::Hashing::hash_of(&info), )?; } - ensure!(IdentityOf::::contains_key(&caller), "Identity does not exist."); - }: _(RawOrigin::Signed(caller.clone())) - verify { - ensure!(!IdentityOf::::contains_key(&caller), "Identity not cleared."); + ensure!( + IdentityOf::::contains_key(&caller), + "Identity does not exist." + ); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + + ensure!( + !IdentityOf::::contains_key(&caller), + "Identity not cleared." + ); + + Ok(()) } - request_judgement { + #[benchmark] + fn request_judgement( + r: Linear<1, MAX_REGISTRARS>, + x: Linear<0, MAX_ADDITIONAL_FIELDS>, + ) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get() => { + add_registrars::(r)?; + + { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller)); + let caller_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(caller)); Identity::::set_identity(caller_origin, Box::new(info))?; - }; - }: _(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into()) - verify { - assert_last_event::(Event::::JudgementRequested { who: caller, registrar_index: r-1 }.into()); + } + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into()); + + assert_last_event::( + Event::::JudgementRequested { + who: caller, + registrar_index: r - 1, + } + .into(), + ); + + Ok(()) } - cancel_request { + #[benchmark] + fn cancel_request( + r: Linear<1, MAX_REGISTRARS>, + x: Linear<0, MAX_ADDITIONAL_FIELDS>, + ) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get() => { + add_registrars::(r)?; + + { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller)); + let caller_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(caller)); Identity::::set_identity(caller_origin, Box::new(info))?; - }; + } Identity::::request_judgement(caller_origin, r - 1, 10u32.into())?; - }: _(RawOrigin::Signed(caller.clone()), r - 1) - verify { - assert_last_event::(Event::::JudgementUnrequested { who: caller, registrar_index: r-1 }.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), r - 1); + + assert_last_event::( + Event::::JudgementUnrequested { + who: caller, + registrar_index: r - 1, + } + .into(), + ); + + Ok(()) } - set_fee { + #[benchmark] + fn set_fee(r: Linear<2, MAX_REGISTRARS>) -> Result<(), BenchmarkError> { + let r = r - 1; let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; + add_registrars::(r)?; let registrar_origin = T::RegistrarOrigin::try_successful_origin().unwrap(); Identity::::add_registrar(registrar_origin, caller_lookup)?; let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().fee == 0u32.into(), "Fee already set."); - }: _(RawOrigin::Signed(caller), r, 100u32.into()) - verify { + ensure!( + registrars[r as usize].as_ref().unwrap().fee == 0u32.into(), + "Fee already set." + ); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), r, 100u32.into()); + let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().fee == 100u32.into(), "Fee not changed."); + ensure!( + registrars[r as usize].as_ref().unwrap().fee == 100u32.into(), + "Fee not changed." + ); + + Ok(()) } - set_account_id { + #[benchmark] + fn set_account_id(r: Linear<2, MAX_REGISTRARS>) -> Result<(), BenchmarkError> { + let r = r - 1; let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; + add_registrars::(r)?; let registrar_origin = T::RegistrarOrigin::try_successful_origin().unwrap(); Identity::::add_registrar(registrar_origin, caller_lookup)?; let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().account == caller, "id not set."); + ensure!( + registrars[r as usize].as_ref().unwrap().account == caller, + "id not set." + ); let new_account = T::Lookup::unlookup(account("new", 0, SEED)); - }: _(RawOrigin::Signed(caller), r, new_account) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), r, new_account); + let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().account == account("new", 0, SEED), "id not changed."); + ensure!( + registrars[r as usize].as_ref().unwrap().account == account("new", 0, SEED), + "id not changed." + ); + + Ok(()) } - set_fields { + #[benchmark] + fn set_fields(r: Linear<2, MAX_REGISTRARS>) -> Result<(), BenchmarkError> { + let r = r - 1; let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; + add_registrars::(r)?; let registrar_origin = T::RegistrarOrigin::try_successful_origin().unwrap(); Identity::::add_registrar(registrar_origin, caller_lookup)?; let fields = IdentityFields( - IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot - | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter + IdentityField::Display + | IdentityField::Legal + | IdentityField::Web + | IdentityField::Riot + | IdentityField::Email + | IdentityField::PgpFingerprint + | IdentityField::Image + | IdentityField::Twitter, ); let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().fields == Default::default(), "fields already set."); - }: _(RawOrigin::Signed(caller), r, fields) - verify { + ensure!( + registrars[r as usize].as_ref().unwrap().fields == Default::default(), + "fields already set." + ); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), r, fields); + let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().fields != Default::default(), "fields not set."); + ensure!( + registrars[r as usize].as_ref().unwrap().fields != Default::default(), + "fields not set." + ); + + Ok(()) } - provide_judgement { + #[benchmark] + fn provide_judgement( + r: Linear<2, MAX_REGISTRARS>, + x: Linear<0, MAX_ADDITIONAL_FIELDS>, + ) -> Result<(), BenchmarkError> { + let r = r - 1; // The user let user: T::AccountId = account("user", r, SEED); - let user_origin = ::RuntimeOrigin::from(RawOrigin::Signed(user.clone())); + let user_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(user.clone())); let user_lookup = ::unlookup(user.clone()); let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); @@ -368,8 +503,7 @@ benchmarks! { let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - let x in 0 .. T::MaxAdditionalFields::get(); + add_registrars::(r)?; let info = create_identity_info::(x); let info_hash = T::Hashing::hash_of(&info); @@ -378,18 +512,38 @@ benchmarks! { let registrar_origin = T::RegistrarOrigin::try_successful_origin().unwrap(); Identity::::add_registrar(registrar_origin, caller_lookup)?; Identity::::request_judgement(user_origin, r, 10u32.into())?; - }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable, info_hash) - verify { - assert_last_event::(Event::::JudgementGiven { target: user, registrar_index: r }.into()) + + #[extrinsic_call] + _( + RawOrigin::Signed(caller), + r, + user_lookup, + Judgement::Reasonable, + info_hash, + ); + + assert_last_event::( + Event::::JudgementGiven { + target: user, + registrar_index: r, + } + .into(), + ); + + Ok(()) } - kill_identity { - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let s in 0 .. T::MaxSubAccounts::get(); - let x in 0 .. T::MaxAdditionalFields::get(); + #[benchmark] + fn kill_identity( + r: Linear<1, MAX_REGISTRARS>, + s: Linear<0, MAX_SUB_ACCOUNTS>, + x: Linear<0, MAX_ADDITIONAL_FIELDS>, + ) -> Result<(), BenchmarkError> { + add_registrars::(r)?; let target: T::AccountId = account("target", 0, SEED); - let target_origin: ::RuntimeOrigin = RawOrigin::Signed(target.clone()).into(); + let target_origin: ::RuntimeOrigin = + RawOrigin::Signed(target.clone()).into(); let target_lookup = T::Lookup::unlookup(target.clone()); let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); @@ -400,7 +554,7 @@ benchmarks! { // User requests judgement from all the registrars, and they approve for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); - let balance_to_use = balance_unit::() * 10u32.into(); + let balance_to_use = balance_unit::() * 10u32.into(); let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); Identity::::request_judgement(target_origin.clone(), i, 10u32.into())?; @@ -414,110 +568,190 @@ benchmarks! { } ensure!(IdentityOf::::contains_key(&target), "Identity not set"); let origin = T::ForceOrigin::try_successful_origin().unwrap(); - }: _(origin, target_lookup) - verify { - ensure!(!IdentityOf::::contains_key(&target), "Identity not removed"); + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, target_lookup); + + ensure!( + !IdentityOf::::contains_key(&target), + "Identity not removed" + ); + + Ok(()) } - force_insert_identities { - let x in 0 .. T::MaxAdditionalFields::get(); - let n in 0..600; + #[benchmark] + fn force_insert_identities( + x: Linear<0, MAX_ADDITIONAL_FIELDS>, + n: Linear<0, 600>, + ) -> Result<(), BenchmarkError> { use frame_benchmarking::account; - let identities = (0..n).map(|i| ( - account("caller", i, SEED), - Registration::, T::MaxRegistrars, T::MaxAdditionalFields> { - judgements: Default::default(), - deposit: Default::default(), - info: create_identity_info::(x), - }, - )).collect::>(); + let identities = (0..n) + .map(|i| { + ( + account("caller", i, SEED), + Registration::, T::MaxRegistrars, T::MaxAdditionalFields> { + judgements: Default::default(), + deposit: Default::default(), + info: create_identity_info::(x), + }, + ) + }) + .collect::>(); let origin = T::ForceOrigin::try_successful_origin().unwrap(); - }: _(origin, identities) - force_remove_identities { - let x in 0 .. T::MaxAdditionalFields::get(); - let n in 0..600; + #[extrinsic_call] + _(origin as T::RuntimeOrigin, identities); + + Ok(()) + } + + #[benchmark] + fn force_remove_identities( + x: Linear<0, MAX_ADDITIONAL_FIELDS>, + n: Linear<0, 600>, + ) -> Result<(), BenchmarkError> { use frame_benchmarking::account; let origin = T::ForceOrigin::try_successful_origin().unwrap(); - let identities = (0..n).map(|i| ( - account("caller", i, SEED), - Registration::, T::MaxRegistrars, T::MaxAdditionalFields> { - judgements: Default::default(), - deposit: Default::default(), - info: create_identity_info::(x), - }, - )).collect::>(); - assert_ok!( - Identity::::force_insert_identities(origin.clone(), identities.clone()), - ); - let identities = identities.into_iter().map(|(acc, _)| acc).collect::>(); - }: _(origin, identities) + let identities = (0..n) + .map(|i| { + ( + account("caller", i, SEED), + Registration::, T::MaxRegistrars, T::MaxAdditionalFields> { + judgements: Default::default(), + deposit: Default::default(), + info: create_identity_info::(x), + }, + ) + }) + .collect::>(); + assert_ok!(Identity::::force_insert_identities( + origin.clone(), + identities.clone() + ),); + let identities = identities + .into_iter() + .map(|(acc, _)| acc) + .collect::>(); + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, identities); + + Ok(()) + } - force_set_subs { - let s in 0 .. T::MaxSubAccounts::get(); - let n in 0..600; + #[benchmark] + fn force_set_subs( + s: Linear<0, MAX_SUB_ACCOUNTS>, + n: Linear<0, 600>, + ) -> Result<(), BenchmarkError> { use frame_benchmarking::account; - let identities = (0..n).map(|i| { - let caller: T::AccountId = account("caller", i, SEED); - ( - caller.clone(), + let identities = (0..n) + .map(|i| { + let caller: T::AccountId = account("caller", i, SEED); ( - BalanceOf::::max_value(), - create_sub_accounts::(&caller, s).unwrap().try_into().unwrap(), - ), - ) - }).collect::>(); + caller.clone(), + ( + BalanceOf::::max_value(), + create_sub_accounts::(&caller, s) + .unwrap() + .try_into() + .unwrap(), + ), + ) + }) + .collect::>(); let origin = T::ForceOrigin::try_successful_origin().unwrap(); - }: _(origin, identities) - add_sub { - let s in 0 .. T::MaxSubAccounts::get() - 1; + #[extrinsic_call] + _(origin as T::RuntimeOrigin, identities); + + Ok(()) + } + #[benchmark] + fn add_sub(s: Linear<1, MAX_SUB_ACCOUNTS>) -> Result<(), BenchmarkError> { + let s = s - 1; let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; let sub = account("new_sub", 0, SEED); let data = Data::Raw(vec![0; 32].try_into().unwrap()); - ensure!(SubsOf::::get(&caller).1.len() as u32 == s, "Subs not set."); - }: _(RawOrigin::Signed(caller.clone()), T::Lookup::unlookup(sub), data) - verify { - ensure!(SubsOf::::get(&caller).1.len() as u32 == s + 1, "Subs not added."); - } + ensure!( + SubsOf::::get(&caller).1.len() as u32 == s, + "Subs not set." + ); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + T::Lookup::unlookup(sub), + data, + ); + + ensure!( + SubsOf::::get(&caller).1.len() as u32 == s + 1, + "Subs not added." + ); - rename_sub { - let s in 1 .. T::MaxSubAccounts::get(); + Ok(()) + } + #[benchmark] + fn rename_sub(s: Linear<1, MAX_SUB_ACCOUNTS>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let (sub, _) = add_sub_accounts::(&caller, s)?.remove(0); let data = Data::Raw(vec![1; 32].try_into().unwrap()); - ensure!(SuperOf::::get(&sub).unwrap().1 != data, "data already set"); - }: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone()), data.clone()) - verify { + ensure!( + SuperOf::::get(&sub).unwrap().1 != data, + "data already set" + ); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller), + T::Lookup::unlookup(sub.clone()), + data.clone(), + ); + ensure!(SuperOf::::get(&sub).unwrap().1 == data, "data not set"); - } - remove_sub { - let s in 1 .. T::MaxSubAccounts::get(); + Ok(()) + } + #[benchmark] + fn remove_sub(s: Linear<1, MAX_SUB_ACCOUNTS>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let (sub, _) = add_sub_accounts::(&caller, s)?.remove(0); ensure!(SuperOf::::contains_key(&sub), "Sub doesn't exists"); - }: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone())); + ensure!(!SuperOf::::contains_key(&sub), "Sub not removed"); - } - quit_sub { - let s in 0 .. T::MaxSubAccounts::get() - 1; + Ok(()) + } + #[benchmark] + fn quit_sub(s: Linear<1, MAX_SUB_ACCOUNTS>) -> Result<(), BenchmarkError> { + let s = s - 1; let caller: T::AccountId = whitelisted_caller(); let sup = account("super", 0, SEED); let _ = add_sub_accounts::(&sup, s)?; let sup_origin = RawOrigin::Signed(sup).into(); - Identity::::add_sub(sup_origin, T::Lookup::unlookup(caller.clone()), Data::Raw(vec![0; 32].try_into().unwrap()))?; + Identity::::add_sub( + sup_origin, + T::Lookup::unlookup(caller.clone()), + Data::Raw(vec![0; 32].try_into().unwrap()), + )?; ensure!(SuperOf::::contains_key(&caller), "Sub doesn't exists"); - }: _(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + ensure!(!SuperOf::::contains_key(&caller), "Sub not removed"); + + Ok(()) } impl_benchmark_test_suite!(Identity, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/pallets/inflation/src/benchmarking.rs b/pallets/inflation/src/benchmarking.rs index 32ef928e06..4632c8e2a5 100644 --- a/pallets/inflation/src/benchmarking.rs +++ b/pallets/inflation/src/benchmarking.rs @@ -16,18 +16,29 @@ #![cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::benchmarks; -use frame_support::{pallet_prelude::*, traits::Hooks}; +use frame_benchmarking::v2::*; +use frame_support::traits::Hooks; +use sp_std::vec; use super::*; use crate::Pallet as Inflation; -benchmarks! { +#[benchmarks] +mod benchmarks { + use super::*; - on_initialize { + #[benchmark] + fn on_initialize() -> Result<(), BenchmarkError> { let block1: BlockNumberFor = 1u32.into(); let block2: BlockNumberFor = 2u32.into(); as Hooks>::on_initialize(block1); // Create Treasury account - }: { as Hooks>::on_initialize(block2); } // Benchmark deposit_into_existing path + #[block] + { + as Hooks>::on_initialize(block2); + // Benchmark deposit_into_existing path + } + + Ok(()) + } } diff --git a/pallets/maintenance/src/benchmarking.rs b/pallets/maintenance/src/benchmarking.rs index 2f26928355..4dba0cf6fb 100644 --- a/pallets/maintenance/src/benchmarking.rs +++ b/pallets/maintenance/src/benchmarking.rs @@ -14,36 +14,60 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use frame_benchmarking::benchmarks; +use frame_benchmarking::v2::*; use frame_support::{ensure, pallet_prelude::Weight, traits::StorePreimage}; use frame_system::RawOrigin; use parity_scale_codec::Encode; +use sp_std::vec; use super::*; use crate::{Config, Pallet as Maintenance}; -benchmarks! { - enable { - }: _(RawOrigin::Root) - verify { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn enable() -> Result<(), BenchmarkError> { + #[extrinsic_call] + _(RawOrigin::Root); + ensure!(>::get(), "didn't enable the MM"); + + Ok(()) } - disable { + #[benchmark] + fn disable() -> Result<(), BenchmarkError> { Maintenance::::enable(RawOrigin::Root.into())?; - }: _(RawOrigin::Root) - verify { + + #[extrinsic_call] + _(RawOrigin::Root); + ensure!(!>::get(), "didn't disable the MM"); + + Ok(()) } - #[pov_mode = MaxEncodedLen { - // PoV size is deducted from weight_bound - Preimage::PreimageFor: Measured - }] - execute_preimage { - let call = ::RuntimeCall::from(frame_system::Call::::remark { remark: 1u32.encode() }); + // TODO: fix + // #[pov_mode = MaxEncodedLen { + // // PoV size is deducted from weight_bound + // Preimage::PreimageFor: Measured + // }] + #[benchmark] + fn execute_preimage() -> Result<(), BenchmarkError> { + let call = ::RuntimeCall::from(frame_system::Call::::remark { + remark: 1u32.encode(), + }); let hash = T::Preimages::note(call.encode().into())?; - }: _(RawOrigin::Root, hash, Weight::from_parts(100000000000, 100000000000)) - verify { + + #[extrinsic_call] + _( + RawOrigin::Root, + hash, + Weight::from_parts(100000000000, 100000000000), + ); + + Ok(()) } } diff --git a/pallets/nonfungible/src/benchmarking.rs b/pallets/nonfungible/src/benchmarking.rs index 8584aab31d..f3c2731163 100644 --- a/pallets/nonfungible/src/benchmarking.rs +++ b/pallets/nonfungible/src/benchmarking.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use frame_benchmarking::{account, benchmarks}; +use frame_benchmarking::v2::{account, benchmarks, BenchmarkError}; use pallet_common::{ bench_init, benchmarking::{ @@ -64,236 +64,437 @@ fn create_collection( ) } -benchmarks! { - create_item { - bench_init!{ +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn create_item() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); to: cross_sub; }; - }: {create_max_item(&collection, &sender, to.clone())?} - create_multiple_items { - let b in 0..MAX_ITEMS_PER_BATCH; - bench_init!{ + #[block] + { + create_max_item(&collection, &sender, to.clone())?; + } + + Ok(()) + } + + #[benchmark] + fn create_multiple_items(b: Linear<0, MAX_ITEMS_PER_BATCH>) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); to: cross_sub; }; - let data = (0..b).map(|_| create_max_item_data::(to.clone())).collect(); - }: {>::create_multiple_items(&collection, &sender, data, &Unlimited)?} + let data = (0..b) + .map(|_| create_max_item_data::(to.clone())) + .collect(); - create_multiple_items_ex { - let b in 0..MAX_ITEMS_PER_BATCH; - bench_init!{ + #[block] + { + >::create_multiple_items(&collection, &sender, data, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn create_multiple_items_ex(b: Linear<0, MAX_ITEMS_PER_BATCH>) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); }; - let data = (0..b).map(|i| { - bench_init!(to: cross_sub(i);); - create_max_item_data::(to) - }).collect(); - }: {>::create_multiple_items(&collection, &sender, data, &Unlimited)?} - - burn_item { - bench_init!{ + let data = (0..b) + .map(|i| { + bench_init!(to: cross_sub(i);); + create_max_item_data::(to) + }) + .collect(); + + #[block] + { + >::create_multiple_items(&collection, &sender, data, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn burn_item() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); burner: cross_sub; }; let item = create_max_item(&collection, &sender, burner.clone())?; - }: {>::burn(&collection, &burner, item)?} - burn_recursively_self_raw { - bench_init!{ + #[block] + { + >::burn(&collection, &burner, item)?; + } + + Ok(()) + } + + #[benchmark] + fn burn_recursively_self_raw() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); burner: cross_sub; }; let item = create_max_item(&collection, &sender, burner.clone())?; - }: {>::burn_recursively(&collection, &burner, item, &Unlimited, &Unlimited)?} - burn_recursively_breadth_plus_self_plus_self_per_each_raw { - let b in 0..200; - bench_init!{ + #[block] + { + >::burn_recursively(&collection, &burner, item, &Unlimited, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn burn_recursively_breadth_plus_self_plus_self_per_each_raw( + b: Linear<0, 200>, + ) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); burner: cross_sub; }; let item = create_max_item(&collection, &sender, burner.clone())?; - for i in 0..b { - create_max_item(&collection, &sender, T::CrossTokenAddressMapping::token_to_address(collection.id, item))?; + for _ in 0..b { + create_max_item( + &collection, + &sender, + T::CrossTokenAddressMapping::token_to_address(collection.id, item), + )?; + } + + #[block] + { + >::burn_recursively(&collection, &burner, item, &Unlimited, &Unlimited)?; } - }: {>::burn_recursively(&collection, &burner, item, &Unlimited, &Unlimited)?} - transfer_raw { - bench_init!{ + Ok(()) + } + + #[benchmark] + fn transfer_raw() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; receiver: cross_sub; }; let item = create_max_item(&collection, &owner, sender.clone())?; - }: {>::transfer(&collection, &sender, &receiver, item, &Unlimited)?} - approve { - bench_init!{ + #[block] + { + >::transfer(&collection, &sender, &receiver, item, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn approve() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; }; let item = create_max_item(&collection, &owner, sender.clone())?; - }: {>::set_allowance(&collection, &sender, item, Some(&spender))?} - approve_from { - bench_init!{ + #[block] + { + >::set_allowance(&collection, &sender, item, Some(&spender))?; + } + + Ok(()) + } + + #[benchmark] + fn approve_from() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; }; let owner_eth = T::CrossAccountId::from_eth(*sender.as_eth()); let item = create_max_item(&collection, &owner, owner_eth.clone())?; - }: {>::set_allowance_from(&collection, &sender, &owner_eth, item, Some(&spender))?} - check_allowed_raw { - bench_init!{ + #[block] + { + >::set_allowance_from( + &collection, + &sender, + &owner_eth, + item, + Some(&spender), + )?; + } + + Ok(()) + } + + #[benchmark] + fn check_allowed_raw() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); - owner: cross_from_sub; sender: cross_sub; spender: cross_sub; receiver: cross_sub; + owner: cross_from_sub; sender: cross_sub; spender: cross_sub; }; let item = create_max_item(&collection, &owner, sender.clone())?; >::set_allowance(&collection, &sender, item, Some(&spender))?; - }: {>::check_allowed(&collection, &spender, &sender, item, &Unlimited)?} - burn_from { - bench_init!{ + #[block] + { + >::check_allowed(&collection, &spender, &sender, item, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn burn_from() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; burner: cross_sub; }; let item = create_max_item(&collection, &owner, sender.clone())?; >::set_allowance(&collection, &sender, item, Some(&burner))?; - }: {>::burn_from(&collection, &burner, &sender, item, &Unlimited)?} - set_token_property_permissions { - let b in 0..MAX_PROPERTIES_PER_ITEM; - bench_init!{ + #[block] + { + >::burn_from(&collection, &burner, &sender, item, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn set_token_property_permissions( + b: Linear<0, MAX_PROPERTIES_PER_ITEM>, + ) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let perms = (0..b).map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: false, - collection_admin: false, - token_owner: false, - }, - }).collect::>(); - }: {>::set_token_property_permissions(&collection, &owner, perms)?} - - set_token_properties { - let b in 0..MAX_PROPERTIES_PER_ITEM; - bench_init!{ + let perms = (0..b) + .map(|k| PropertyKeyPermission { + key: property_key(k as usize), + permission: PropertyPermission { + mutable: false, + collection_admin: false, + token_owner: false, + }, + }) + .collect::>(); + + #[block] + { + >::set_token_property_permissions(&collection, &owner, perms)?; + } + + Ok(()) + } + + #[benchmark] + fn set_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let perms = (0..b).map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: false, - collection_admin: true, - token_owner: true, - }, - }).collect::>(); + let perms = (0..b) + .map(|k| PropertyKeyPermission { + key: property_key(k as usize), + permission: PropertyPermission { + mutable: false, + collection_admin: true, + token_owner: true, + }, + }) + .collect::>(); >::set_token_property_permissions(&collection, &owner, perms)?; - let props = (0..b).map(|k| Property { - key: property_key(k as usize), - value: property_value(), - }).collect::>(); + let props = (0..b) + .map(|k| Property { + key: property_key(k as usize), + value: property_value(), + }) + .collect::>(); let item = create_max_item(&collection, &owner, owner.clone())?; - }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?} - init_token_properties { - let b in 0..MAX_PROPERTIES_PER_ITEM; - bench_init!{ + #[block] + { + >::set_token_properties( + &collection, + &owner, + item, + props.into_iter(), + &Unlimited, + )?; + } + + Ok(()) + } + + #[benchmark] + fn init_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let perms = (0..b).map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: false, - collection_admin: true, - token_owner: true, - }, - }).collect::>(); + let perms = (0..b) + .map(|k| PropertyKeyPermission { + key: property_key(k as usize), + permission: PropertyPermission { + mutable: false, + collection_admin: true, + token_owner: true, + }, + }) + .collect::>(); >::set_token_property_permissions(&collection, &owner, perms)?; - let props = (0..b).map(|k| Property { - key: property_key(k as usize), - value: property_value(), - }).collect::>(); + let props = (0..b) + .map(|k| Property { + key: property_key(k as usize), + value: property_value(), + }) + .collect::>(); let item = create_max_item(&collection, &owner, owner.clone())?; - let (is_collection_admin, property_permissions) = load_is_admin_and_property_permissions(&collection, &owner); - }: { - let mut property_writer = pallet_common::collection_info_loaded_property_writer( - &collection, - is_collection_admin, - property_permissions, - ); + let (is_collection_admin, property_permissions) = + load_is_admin_and_property_permissions(&collection, &owner); + todo!(); + #[block] + {} + // let mut property_writer = pallet_common::collection_info_loaded_property_writer( + // &collection, + // is_collection_admin, + // property_permissions, + // ); - property_writer.write_token_properties( - true, - item, - props.into_iter(), - crate::erc::ERC721TokenEvent::TokenChanged { - token_id: item.into(), - } - .to_log(T::ContractAddress::get()), - )? + // #[block] + // { + // property_writer.write_token_properties( + // true, + // item, + // props.into_iter(), + // crate::erc::ERC721TokenEvent::TokenChanged { + // token_id: item.into(), + // } + // .to_log(T::ContractAddress::get()), + // )?; + // } + + Ok(()) } - delete_token_properties { - let b in 0..MAX_PROPERTIES_PER_ITEM; - bench_init!{ + #[benchmark] + fn delete_token_properties( + b: Linear<0, MAX_PROPERTIES_PER_ITEM>, + ) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let perms = (0..b).map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: true, - collection_admin: true, - token_owner: true, - }, - }).collect::>(); + let perms = (0..b) + .map(|k| PropertyKeyPermission { + key: property_key(k as usize), + permission: PropertyPermission { + mutable: true, + collection_admin: true, + token_owner: true, + }, + }) + .collect::>(); >::set_token_property_permissions(&collection, &owner, perms)?; - let props = (0..b).map(|k| Property { - key: property_key(k as usize), - value: property_value(), - }).collect::>(); + let props = (0..b) + .map(|k| Property { + key: property_key(k as usize), + value: property_value(), + }) + .collect::>(); let item = create_max_item(&collection, &owner, owner.clone())?; - >::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?; + >::set_token_properties( + &collection, + &owner, + item, + props.into_iter(), + &Unlimited, + )?; let to_delete = (0..b).map(|k| property_key(k as usize)).collect::>(); - }: {>::delete_token_properties(&collection, &owner, item, to_delete.into_iter(), &Unlimited)?} - token_owner { - bench_init!{ + #[block] + { + >::delete_token_properties( + &collection, + &owner, + item, + to_delete.into_iter(), + &Unlimited, + )?; + } + + Ok(()) + } + + #[benchmark] + fn token_owner() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; let item = create_max_item(&collection, &owner, owner.clone())?; - }: {collection.token_owner(item).unwrap()} - set_allowance_for_all { - bench_init!{ + #[block] + { + collection.token_owner(item).unwrap(); + } + + Ok(()) + } + + #[benchmark] + fn set_allowance_for_all() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; operator: cross_sub; }; - }: {>::set_allowance_for_all(&collection, &owner, &operator, true)?} - allowance_for_all { - bench_init!{ + #[block] + { + >::set_allowance_for_all(&collection, &owner, &operator, true)?; + } + + Ok(()) + } + + #[benchmark] + fn allowance_for_all() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; operator: cross_sub; }; - }: {>::allowance_for_all(&collection, &owner, &operator)} - repair_item { - bench_init!{ + #[block] + { + >::allowance_for_all(&collection, &owner, &operator); + } + + Ok(()) + } + + #[benchmark] + fn repair_item() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; let item = create_max_item(&collection, &owner, owner.clone())?; - }: {>::repair_item(&collection, item)?} + + #[block] + { + >::repair_item(&collection, item)?; + } + + Ok(()) + } } diff --git a/pallets/refungible/src/benchmarking.rs b/pallets/refungible/src/benchmarking.rs index 469b582742..06664fe2f3 100644 --- a/pallets/refungible/src/benchmarking.rs +++ b/pallets/refungible/src/benchmarking.rs @@ -16,11 +16,12 @@ use core::{convert::TryInto, iter::IntoIterator}; -use frame_benchmarking::{account, benchmarks}; +use frame_benchmarking::v2::*; use pallet_common::{ bench_init, benchmarking::{ - create_collection_raw, load_is_admin_and_property_permissions, property_key, property_value, + create_collection_raw, /*load_is_admin_and_property_permissions,*/ property_key, + property_value, }, }; use sp_std::prelude::*; @@ -68,38 +69,70 @@ fn create_collection( ) } -benchmarks! { - create_item { - bench_init!{ +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn create_item() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); to: cross_sub; }; - }: {create_max_item(&collection, &sender, [(to.clone(), 200)])?} - create_multiple_items { - let b in 0..MAX_ITEMS_PER_BATCH; - bench_init!{ + #[block] + { + create_max_item(&collection, &sender, [(to.clone(), 200)])?; + } + + Ok(()) + } + + #[benchmark] + fn create_multiple_items(b: Linear<0, MAX_ITEMS_PER_BATCH>) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); to: cross_sub; }; - let data = (0..b).map(|_| create_max_item_data::([(to.clone(), 200)])).collect(); - }: {>::create_multiple_items(&collection, &sender, data, &Unlimited)?} + let data = (0..b) + .map(|_| create_max_item_data::([(to.clone(), 200)])) + .collect(); + + #[block] + { + >::create_multiple_items(&collection, &sender, data, &Unlimited)?; + } - create_multiple_items_ex_multiple_items { - let b in 0..MAX_ITEMS_PER_BATCH; - bench_init!{ + Ok(()) + } + #[benchmark] + fn create_multiple_items_ex_multiple_items( + b: Linear<0, MAX_ITEMS_PER_BATCH>, + ) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); }; - let data = (0..b).map(|t| { - bench_init!(to: cross_sub(t);); - create_max_item_data::([(to, 200)]) - }).collect(); - }: {>::create_multiple_items(&collection, &sender, data, &Unlimited)?} - - create_multiple_items_ex_multiple_owners { - let b in 0..MAX_ITEMS_PER_BATCH; - bench_init!{ + let data = (0..b) + .map(|t| { + bench_init!(to: cross_sub(t);); + create_max_item_data::([(to, 200)]) + }) + .collect(); + + #[block] + { + >::create_multiple_items(&collection, &sender, data, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn create_multiple_items_ex_multiple_owners( + b: Linear<0, MAX_ITEMS_PER_BATCH>, + ) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); }; @@ -107,258 +140,531 @@ benchmarks! { bench_init!(to: cross_sub(u);); (to, 200) }))]; - }: {>::create_multiple_items(&collection, &sender, data, &Unlimited)?} + + #[block] + { + >::create_multiple_items(&collection, &sender, data, &Unlimited)?; + } + + Ok(()) + } // Other user left, token data is kept - burn_item_partial { - bench_init!{ + #[benchmark] + fn burn_item_partial() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); burner: cross_sub; another_owner: cross_sub; }; - let item = create_max_item(&collection, &sender, [(burner.clone(), 200), (another_owner, 200)])?; - }: {>::burn(&collection, &burner, item, 200)?} + let item = create_max_item( + &collection, + &sender, + [(burner.clone(), 200), (another_owner, 200)], + )?; + + #[block] + { + >::burn(&collection, &burner, item, 200)?; + } + + Ok(()) + } + // No users remaining, token is destroyed - burn_item_fully { - bench_init!{ + #[benchmark] + fn burn_item_fully() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); - sender: cross_from_sub(owner); burner: cross_sub; another_owner: cross_sub; + sender: cross_from_sub(owner); burner: cross_sub; }; let item = create_max_item(&collection, &sender, [(burner.clone(), 200)])?; - }: {>::burn(&collection, &burner, item, 200)?} - transfer_normal { - bench_init!{ + #[block] + { + >::burn(&collection, &burner, item, 200)?; + } + + Ok(()) + } + + #[benchmark] + fn transfer_normal() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); receiver: cross_sub; }; - let item = create_max_item(&collection, &sender, [(sender.clone(), 200), (receiver.clone(), 200)])?; - }: {>::transfer(&collection, &sender, &receiver, item, 100, &Unlimited)?} + let item = create_max_item( + &collection, + &sender, + [(sender.clone(), 200), (receiver.clone(), 200)], + )?; + + #[block] + { + >::transfer(&collection, &sender, &receiver, item, 100, &Unlimited)?; + } + + Ok(()) + } + // Target account is created - transfer_creating { - bench_init!{ + #[benchmark] + fn transfer_creating() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); receiver: cross_sub; }; let item = create_max_item(&collection, &sender, [(sender.clone(), 200)])?; - }: {>::transfer(&collection, &sender, &receiver, item, 100, &Unlimited)?} + + #[block] + { + >::transfer(&collection, &sender, &receiver, item, 100, &Unlimited)?; + } + + Ok(()) + } + // Source account is destroyed - transfer_removing { - bench_init!{ + #[benchmark] + fn transfer_removing() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); receiver: cross_sub; }; - let item = create_max_item(&collection, &sender, [(sender.clone(), 200), (receiver.clone(), 200)])?; - }: {>::transfer(&collection, &sender, &receiver, item, 200, &Unlimited)?} + let item = create_max_item( + &collection, + &sender, + [(sender.clone(), 200), (receiver.clone(), 200)], + )?; + + #[block] + { + >::transfer(&collection, &sender, &receiver, item, 200, &Unlimited)?; + } + + Ok(()) + } + // Source account destroyed, target created - transfer_creating_removing { - bench_init!{ + #[benchmark] + fn transfer_creating_removing() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); receiver: cross_sub; }; let item = create_max_item(&collection, &sender, [(sender.clone(), 200)])?; - }: {>::transfer(&collection, &sender, &receiver, item, 200, &Unlimited)?} - approve { - bench_init!{ + #[block] + { + >::transfer(&collection, &sender, &receiver, item, 200, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn approve() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; }; let item = create_max_item(&collection, &owner, [(sender.clone(), 200)])?; - }: {>::set_allowance(&collection, &sender, &spender, item, 100)?} - approve_from { - bench_init!{ + #[block] + { + >::set_allowance(&collection, &sender, &spender, item, 100)?; + } + + Ok(()) + } + + #[benchmark] + fn approve_from() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; }; let owner_eth = T::CrossAccountId::from_eth(*sender.as_eth()); let item = create_max_item(&collection, &owner, [(owner_eth.clone(), 200)])?; - }: {>::set_allowance_from(&collection, &sender, &owner_eth, &spender, item, 100)?} - transfer_from_normal { - bench_init!{ + #[block] + { + >::set_allowance_from(&collection, &sender, &owner_eth, &spender, item, 100)?; + } + + Ok(()) + } + + #[benchmark] + fn transfer_from_normal() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; receiver: cross_sub; }; - let item = create_max_item(&collection, &owner, [(sender.clone(), 200), (receiver.clone(), 200)])?; + let item = create_max_item( + &collection, + &owner, + [(sender.clone(), 200), (receiver.clone(), 200)], + )?; >::set_allowance(&collection, &sender, &spender, item, 100)?; - }: {>::transfer_from(&collection, &spender, &sender, &receiver, item, 100, &Unlimited)?} + + #[block] + { + >::transfer_from( + &collection, + &spender, + &sender, + &receiver, + item, + 100, + &Unlimited, + )?; + } + + Ok(()) + } + // Target account is created - transfer_from_creating { - bench_init!{ + #[benchmark] + fn transfer_from_creating() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; receiver: cross_sub; }; let item = create_max_item(&collection, &owner, [(sender.clone(), 200)])?; >::set_allowance(&collection, &sender, &spender, item, 100)?; - }: {>::transfer_from(&collection, &spender, &sender, &receiver, item, 100, &Unlimited)?} + + #[block] + { + >::transfer_from( + &collection, + &spender, + &sender, + &receiver, + item, + 100, + &Unlimited, + )?; + } + + Ok(()) + } + // Source account is destroyed - transfer_from_removing { - bench_init!{ + #[benchmark] + fn transfer_from_removing() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; receiver: cross_sub; }; - let item = create_max_item(&collection, &owner, [(sender.clone(), 200), (receiver.clone(), 200)])?; + let item = create_max_item( + &collection, + &owner, + [(sender.clone(), 200), (receiver.clone(), 200)], + )?; >::set_allowance(&collection, &sender, &spender, item, 200)?; - }: {>::transfer_from(&collection, &spender, &sender, &receiver, item, 200, &Unlimited)?} + + #[block] + { + >::transfer_from( + &collection, + &spender, + &sender, + &receiver, + item, + 200, + &Unlimited, + )?; + } + + Ok(()) + } + // Source account destroyed, target created - transfer_from_creating_removing { - bench_init!{ + #[benchmark] + fn transfer_from_creating_removing() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; spender: cross_sub; receiver: cross_sub; }; let item = create_max_item(&collection, &owner, [(sender.clone(), 200)])?; >::set_allowance(&collection, &sender, &spender, item, 200)?; - }: {>::transfer_from(&collection, &spender, &sender, &receiver, item, 200, &Unlimited)?} + + #[block] + { + >::transfer_from( + &collection, + &spender, + &sender, + &receiver, + item, + 200, + &Unlimited, + )?; + } + + Ok(()) + } // Both source account and token is destroyed - burn_from { - bench_init!{ + #[benchmark] + fn burn_from() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; sender: cross_sub; burner: cross_sub; }; let item = create_max_item(&collection, &owner, [(sender.clone(), 200)])?; >::set_allowance(&collection, &sender, &burner, item, 200)?; - }: {>::burn_from(&collection, &burner, &sender, item, 200, &Unlimited)?} - set_token_property_permissions { - let b in 0..MAX_PROPERTIES_PER_ITEM; - bench_init!{ + #[block] + { + >::burn_from(&collection, &burner, &sender, item, 200, &Unlimited)?; + } + + Ok(()) + } + + #[benchmark] + fn set_token_property_permissions( + b: Linear<0, MAX_PROPERTIES_PER_ITEM>, + ) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let perms = (0..b).map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: false, - collection_admin: false, - token_owner: false, - }, - }).collect::>(); - }: {>::set_token_property_permissions(&collection, &owner, perms)?} - - set_token_properties { - let b in 0..MAX_PROPERTIES_PER_ITEM; - bench_init!{ + let perms = (0..b) + .map(|k| PropertyKeyPermission { + key: property_key(k as usize), + permission: PropertyPermission { + mutable: false, + collection_admin: false, + token_owner: false, + }, + }) + .collect::>(); + + #[block] + { + >::set_token_property_permissions(&collection, &owner, perms)?; + } + + Ok(()) + } + + #[benchmark] + fn set_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let perms = (0..b).map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: false, - collection_admin: true, - token_owner: true, - }, - }).collect::>(); + let perms = (0..b) + .map(|k| PropertyKeyPermission { + key: property_key(k as usize), + permission: PropertyPermission { + mutable: false, + collection_admin: true, + token_owner: true, + }, + }) + .collect::>(); >::set_token_property_permissions(&collection, &owner, perms)?; - let props = (0..b).map(|k| Property { - key: property_key(k as usize), - value: property_value(), - }).collect::>(); + let props = (0..b) + .map(|k| Property { + key: property_key(k as usize), + value: property_value(), + }) + .collect::>(); let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?} - init_token_properties { - let b in 0..MAX_PROPERTIES_PER_ITEM; - bench_init!{ + #[block] + { + >::set_token_properties( + &collection, + &owner, + item, + props.into_iter(), + &Unlimited, + )?; + } + + Ok(()) + } + + #[benchmark] + fn init_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let perms = (0..b).map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: false, - collection_admin: true, - token_owner: true, - }, - }).collect::>(); + let perms = (0..b) + .map(|k| PropertyKeyPermission { + key: property_key(k as usize), + permission: PropertyPermission { + mutable: false, + collection_admin: true, + token_owner: true, + }, + }) + .collect::>(); >::set_token_property_permissions(&collection, &owner, perms)?; - let props = (0..b).map(|k| Property { - key: property_key(k as usize), - value: property_value(), - }).collect::>(); - let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - - let (is_collection_admin, property_permissions) = load_is_admin_and_property_permissions(&collection, &owner); - }: { - let mut property_writer = pallet_common::collection_info_loaded_property_writer( - &collection, - is_collection_admin, - property_permissions, - ); - - property_writer.write_token_properties( - true, - item, - props.into_iter(), - crate::erc::ERC721TokenEvent::TokenChanged { - token_id: item.into(), - } - .to_log(T::ContractAddress::get()), - )? + // let props = (0..b).map(|k| Property { + // key: property_key(k as usize), + // value: property_value(), + // }).collect::>(); + // let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; + + // let (is_collection_admin, property_permissions) = load_is_admin_and_property_permissions(&collection, &owner); + + #[block] + {} + todo!(); + // let mut property_writer = pallet_common::collection_info_loaded_property_writer( + // &collection, + // is_collection_admin, + // property_permissions, + // ); + + // #[block] + // { + // property_writer.write_token_properties( + // true, + // item, + // props.into_iter(), + // crate::erc::ERC721TokenEvent::TokenChanged { + // token_id: item.into(), + // } + // .to_log(T::ContractAddress::get()), + // )?; + // } + + Ok(()) } - delete_token_properties { - let b in 0..MAX_PROPERTIES_PER_ITEM; - bench_init!{ + #[benchmark] + fn delete_token_properties( + b: Linear<0, MAX_PROPERTIES_PER_ITEM>, + ) -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let perms = (0..b).map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: true, - collection_admin: true, - token_owner: true, - }, - }).collect::>(); + let perms = (0..b) + .map(|k| PropertyKeyPermission { + key: property_key(k as usize), + permission: PropertyPermission { + mutable: true, + collection_admin: true, + token_owner: true, + }, + }) + .collect::>(); >::set_token_property_permissions(&collection, &owner, perms)?; - let props = (0..b).map(|k| Property { - key: property_key(k as usize), - value: property_value(), - }).collect::>(); + let props = (0..b) + .map(|k| Property { + key: property_key(k as usize), + value: property_value(), + }) + .collect::>(); let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - >::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?; + >::set_token_properties( + &collection, + &owner, + item, + props.into_iter(), + &Unlimited, + )?; let to_delete = (0..b).map(|k| property_key(k as usize)).collect::>(); - }: {>::delete_token_properties(&collection, &owner, item, to_delete.into_iter(), &Unlimited)?} - repartition_item { - bench_init!{ + #[block] + { + >::delete_token_properties( + &collection, + &owner, + item, + to_delete.into_iter(), + &Unlimited, + )?; + } + + Ok(()) + } + + #[benchmark] + fn repartition_item() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); owner: cross_sub; }; let item = create_max_item(&collection, &sender, [(owner.clone(), 100)])?; - }: {>::repartition(&collection, &owner, item, 200)?} - token_owner { - bench_init!{ + #[block] + { + >::repartition(&collection, &owner, item, 200)?; + } + + Ok(()) + } + + #[benchmark] + fn token_owner() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); sender: cross_from_sub(owner); owner: cross_sub; }; let item = create_max_item(&collection, &sender, [(owner, 100)])?; - }: {>::token_owner(collection.id, item).unwrap()} - set_allowance_for_all { - bench_init!{ + #[block] + { + >::token_owner(collection.id, item).unwrap(); + } + + Ok(()) + } + + #[benchmark] + fn set_allowance_for_all() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; operator: cross_sub; }; - }: {>::set_allowance_for_all(&collection, &owner, &operator, true)?} - allowance_for_all { - bench_init!{ + #[block] + { + >::set_allowance_for_all(&collection, &owner, &operator, true)?; + } + + Ok(()) + } + + #[benchmark] + fn allowance_for_all() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; operator: cross_sub; }; - }: {>::allowance_for_all(&collection, &owner, &operator)} - repair_item { - bench_init!{ + #[block] + { + >::allowance_for_all(&collection, &owner, &operator); + } + + Ok(()) + } + + #[benchmark] + fn repair_item() -> Result<(), BenchmarkError> { + bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; let item = create_max_item(&collection, &owner, [(owner.clone(), 100)])?; - }: {>::repair_item(&collection, item)?} + + #[block] + { + >::repair_item(&collection, item)?; + } + + Ok(()) + } } diff --git a/pallets/structure/src/benchmarking.rs b/pallets/structure/src/benchmarking.rs index dfc97269cf..364ceaa5e5 100644 --- a/pallets/structure/src/benchmarking.rs +++ b/pallets/structure/src/benchmarking.rs @@ -14,10 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Unique Network. If not, see . -use frame_benchmarking::{account, benchmarks}; +use frame_benchmarking::v2::{account, benchmarks, BenchmarkError}; use frame_support::traits::{fungible::Balanced, tokens::Precision, Get}; use pallet_common::Config as CommonConfig; use pallet_evm::account::CrossAccountId; +use sp_std::vec; use up_data_structs::{ budget::Unlimited, CollectionMode, CreateCollectionData, CreateItemData, CreateNftData, }; @@ -26,12 +27,21 @@ use super::*; const SEED: u32 = 1; -benchmarks! { - find_parent { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn find_parent() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let caller_cross = T::CrossAccountId::from_sub(caller.clone()); - let _ = ::Currency::deposit(&caller, T::CollectionCreationPrice::get(), Precision::Exact).unwrap(); + let _ = ::Currency::deposit( + &caller, + T::CollectionCreationPrice::get(), + Precision::Exact, + ) + .unwrap(); T::CollectionDispatch::create( caller_cross.clone(), caller_cross.clone(), @@ -43,9 +53,19 @@ benchmarks! { let dispatch = T::CollectionDispatch::dispatch(CollectionId(1))?; let dispatch = dispatch.as_dyn(); - dispatch.create_item(caller_cross.clone(), caller_cross, CreateItemData::NFT(CreateNftData::default()), &Unlimited)?; - }: { - let parent = >::find_parent(CollectionId(1), TokenId(1))?; - assert!(matches!(parent, Parent::User(_))) + dispatch.create_item( + caller_cross.clone(), + caller_cross, + CreateItemData::NFT(CreateNftData::default()), + &Unlimited, + )?; + + #[block] + { + let parent = >::find_parent(CollectionId(1), TokenId(1))?; + assert!(matches!(parent, Parent::User(_))); + } + + Ok(()) } } diff --git a/pallets/unique/src/benchmarking.rs b/pallets/unique/src/benchmarking.rs index 265be355fe..fe633cd820 100644 --- a/pallets/unique/src/benchmarking.rs +++ b/pallets/unique/src/benchmarking.rs @@ -16,7 +16,7 @@ #![cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::{account, benchmarks}; +use frame_benchmarking::v2::{account, benchmarks, BenchmarkError}; use frame_support::traits::{fungible::Balanced, tokens::Precision, Get}; use frame_system::RawOrigin; use pallet_common::{ @@ -63,81 +63,201 @@ pub fn create_nft_collection( create_collection_helper::(owner, CollectionMode::NFT) } -benchmarks! { - create_collection { - let col_name = create_u16_data::<{MAX_COLLECTION_NAME_LENGTH}>(); - let col_desc = create_u16_data::<{MAX_COLLECTION_DESCRIPTION_LENGTH}>(); - let token_prefix = create_data::<{MAX_TOKEN_PREFIX_LENGTH}>(); +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn create_collection() -> Result<(), BenchmarkError> { + let col_name = create_u16_data::<{ MAX_COLLECTION_NAME_LENGTH }>(); + let col_desc = create_u16_data::<{ MAX_COLLECTION_DESCRIPTION_LENGTH }>(); + let token_prefix = create_data::<{ MAX_TOKEN_PREFIX_LENGTH }>(); let mode: CollectionMode = CollectionMode::NFT; let caller: T::AccountId = account("caller", 0, SEED); - let _ = ::Currency::deposit(&caller, T::CollectionCreationPrice::get(), Precision::Exact).unwrap(); - }: _(RawOrigin::Signed(caller.clone()), col_name, col_desc, token_prefix, mode) - verify { - assert_eq!(>::get(CollectionId(1)).unwrap().owner, caller); + let _ = ::Currency::deposit( + &caller, + T::CollectionCreationPrice::get(), + Precision::Exact, + ) + .unwrap(); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + col_name, + col_desc, + token_prefix, + mode, + ); + + assert_eq!( + >::get(CollectionId(1)) + .unwrap() + .owner, + caller + ); + + Ok(()) } - destroy_collection { + #[benchmark] + fn destroy_collection() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let collection = create_nft_collection::(caller.clone())?; - }: _(RawOrigin::Signed(caller.clone()), collection) - add_to_allow_list { + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), collection); + + Ok(()) + } + + #[benchmark] + fn add_to_allow_list() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let allowlist_account: T::AccountId = account("admin", 0, SEED); let collection = create_nft_collection::(caller.clone())?; - }: _(RawOrigin::Signed(caller.clone()), collection, T::CrossAccountId::from_sub(allowlist_account)) - remove_from_allow_list { + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + collection, + T::CrossAccountId::from_sub(allowlist_account), + ); + + Ok(()) + } + + #[benchmark] + fn remove_from_allow_list() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let allowlist_account: T::AccountId = account("admin", 0, SEED); let collection = create_nft_collection::(caller.clone())?; - >::add_to_allow_list(RawOrigin::Signed(caller.clone()).into(), collection, T::CrossAccountId::from_sub(allowlist_account.clone()))?; - }: _(RawOrigin::Signed(caller.clone()), collection, T::CrossAccountId::from_sub(allowlist_account)) + >::add_to_allow_list( + RawOrigin::Signed(caller.clone()).into(), + collection, + T::CrossAccountId::from_sub(allowlist_account.clone()), + )?; - change_collection_owner { + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + collection, + T::CrossAccountId::from_sub(allowlist_account), + ); + + Ok(()) + } + + #[benchmark] + fn change_collection_owner() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let collection = create_nft_collection::(caller.clone())?; let new_owner: T::AccountId = account("admin", 0, SEED); - }: _(RawOrigin::Signed(caller.clone()), collection, new_owner) - add_collection_admin { + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), collection, new_owner); + + Ok(()) + } + + #[benchmark] + fn add_collection_admin() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let collection = create_nft_collection::(caller.clone())?; let new_admin: T::AccountId = account("admin", 0, SEED); - }: _(RawOrigin::Signed(caller.clone()), collection, T::CrossAccountId::from_sub(new_admin)) - remove_collection_admin { + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + collection, + T::CrossAccountId::from_sub(new_admin), + ); + + Ok(()) + } + + #[benchmark] + fn remove_collection_admin() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let collection = create_nft_collection::(caller.clone())?; let new_admin: T::AccountId = account("admin", 0, SEED); - >::add_collection_admin(RawOrigin::Signed(caller.clone()).into(), collection, T::CrossAccountId::from_sub(new_admin.clone()))?; - }: _(RawOrigin::Signed(caller.clone()), collection, T::CrossAccountId::from_sub(new_admin)) + >::add_collection_admin( + RawOrigin::Signed(caller.clone()).into(), + collection, + T::CrossAccountId::from_sub(new_admin.clone()), + )?; + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + collection, + T::CrossAccountId::from_sub(new_admin), + ); - set_collection_sponsor { + Ok(()) + } + + #[benchmark] + fn set_collection_sponsor() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let collection = create_nft_collection::(caller.clone())?; - }: _(RawOrigin::Signed(caller.clone()), collection, caller.clone()) - confirm_sponsorship { + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + collection, + caller.clone(), + ); + + Ok(()) + } + + #[benchmark] + fn confirm_sponsorship() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let collection = create_nft_collection::(caller.clone())?; - >::set_collection_sponsor(RawOrigin::Signed(caller.clone()).into(), collection, caller.clone())?; - }: _(RawOrigin::Signed(caller.clone()), collection) + >::set_collection_sponsor( + RawOrigin::Signed(caller.clone()).into(), + collection, + caller.clone(), + )?; - remove_collection_sponsor { + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), collection); + + Ok(()) + } + + #[benchmark] + fn remove_collection_sponsor() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let collection = create_nft_collection::(caller.clone())?; - >::set_collection_sponsor(RawOrigin::Signed(caller.clone()).into(), collection, caller.clone())?; + >::set_collection_sponsor( + RawOrigin::Signed(caller.clone()).into(), + collection, + caller.clone(), + )?; >::confirm_sponsorship(RawOrigin::Signed(caller.clone()).into(), collection)?; - }: _(RawOrigin::Signed(caller.clone()), collection) - set_transfers_enabled_flag { + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), collection); + + Ok(()) + } + + #[benchmark] + fn set_transfers_enabled_flag() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let collection = create_nft_collection::(caller.clone())?; - }: _(RawOrigin::Signed(caller.clone()), collection, false) + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), collection, false); + + Ok(()) + } - set_collection_limits { + #[benchmark] + fn set_collection_limits() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let collection = create_nft_collection::(caller.clone())?; @@ -152,10 +272,21 @@ benchmarks! { sponsored_data_rate_limit: None, transfers_enabled: Some(true), }; - }: set_collection_limits(RawOrigin::Signed(caller.clone()), collection, cl) - force_repair_collection { + #[extrinsic_call] + set_collection_limits(RawOrigin::Signed(caller.clone()), collection, cl); + + Ok(()) + } + + #[benchmark] + fn force_repair_collection() -> Result<(), BenchmarkError> { let caller: T::AccountId = account("caller", 0, SEED); let collection = create_nft_collection::(caller)?; - }: _(RawOrigin::Root, collection) + + #[extrinsic_call] + _(RawOrigin::Root, collection); + + Ok(()) + } } From 885f1d23d20d8ea8cb7b13e52f3c5c70fa8c5305 Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Tue, 10 Oct 2023 10:52:30 +0000 Subject: [PATCH 113/143] fix: compilation errors --- node/cli/src/command.rs | 3 +++ pallets/inflation/src/benchmarking.rs | 4 +-- pallets/nonfungible/src/benchmarking.rs | 36 +++++++++++-------------- pallets/unique/src/benchmarking.rs | 1 + 4 files changed, 21 insertions(+), 23 deletions(-) diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index c117ad897d..04f873fb34 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -354,6 +354,7 @@ pub fn run() -> Result<()> { } #[cfg(feature = "runtime-benchmarks")] Some(Subcommand::Benchmark(cmd)) => { + use polkadot_cli::Block; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; let runner = cli.create_runner(cmd)?; // Switch on the concrete benchmark sub-command- @@ -363,6 +364,7 @@ pub fn run() -> Result<()> { } BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { let partials = new_partial::< + _, default_runtime::RuntimeApi, DefaultRuntimeExecutor, _, @@ -371,6 +373,7 @@ pub fn run() -> Result<()> { }), BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { let partials = new_partial::< + _, default_runtime::RuntimeApi, DefaultRuntimeExecutor, _, diff --git a/pallets/inflation/src/benchmarking.rs b/pallets/inflation/src/benchmarking.rs index 4632c8e2a5..ac0938beb9 100644 --- a/pallets/inflation/src/benchmarking.rs +++ b/pallets/inflation/src/benchmarking.rs @@ -31,11 +31,11 @@ mod benchmarks { fn on_initialize() -> Result<(), BenchmarkError> { let block1: BlockNumberFor = 1u32.into(); let block2: BlockNumberFor = 2u32.into(); - as Hooks>::on_initialize(block1); // Create Treasury account + as Hooks<_>>::on_initialize(block1); // Create Treasury account #[block] { - as Hooks>::on_initialize(block2); + as Hooks<_>>::on_initialize(block2); // Benchmark deposit_into_existing path } diff --git a/pallets/nonfungible/src/benchmarking.rs b/pallets/nonfungible/src/benchmarking.rs index f3c2731163..90f7499be2 100644 --- a/pallets/nonfungible/src/benchmarking.rs +++ b/pallets/nonfungible/src/benchmarking.rs @@ -360,29 +360,23 @@ mod benchmarks { .collect::>(); let item = create_max_item(&collection, &owner, owner.clone())?; - let (is_collection_admin, property_permissions) = - load_is_admin_and_property_permissions(&collection, &owner); + // let (is_collection_admin, property_permissions) = + // load_is_admin_and_property_permissions(&collection, &owner); todo!(); #[block] - {} - // let mut property_writer = pallet_common::collection_info_loaded_property_writer( - // &collection, - // is_collection_admin, - // property_permissions, - // ); - - // #[block] - // { - // property_writer.write_token_properties( - // true, - // item, - // props.into_iter(), - // crate::erc::ERC721TokenEvent::TokenChanged { - // token_id: item.into(), - // } - // .to_log(T::ContractAddress::get()), - // )?; - // } + { + // let mut property_writer = + // pallet_common::BenchmarkPropertyWriter::new(&collection, lazy_collection_info); + + // property_writer.write_token_properties( + // item, + // props.into_iter(), + // crate::erc::ERC721TokenEvent::TokenChanged { + // token_id: item.into(), + // } + // .to_log(T::ContractAddress::get()), + // )?; + } Ok(()) } diff --git a/pallets/unique/src/benchmarking.rs b/pallets/unique/src/benchmarking.rs index fe633cd820..c3a1be9906 100644 --- a/pallets/unique/src/benchmarking.rs +++ b/pallets/unique/src/benchmarking.rs @@ -24,6 +24,7 @@ use pallet_common::{ erc::CrossAccountId, Config as CommonConfig, }; +use sp_std::vec; use sp_runtime::DispatchError; use up_data_structs::{ CollectionId, CollectionLimits, CollectionMode, MAX_COLLECTION_DESCRIPTION_LENGTH, From af2b5f4b26cc500a97f28448af3abdfbd3d6ba83 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Wed, 11 Oct 2023 11:37:53 +0200 Subject: [PATCH 114/143] fix: build node with benchmarks --- Cargo.lock | 1 + Cargo.toml | 1 + node/cli/src/command.rs | 28 +++++++++++++++++---------- pallets/collator-selection/Cargo.toml | 2 +- pallets/unique/src/benchmarking.rs | 2 +- runtime/common/runtime_apis.rs | 3 ++- runtime/opal/Cargo.toml | 2 ++ 7 files changed, 26 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b1922dbcaa..4767157284 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6536,6 +6536,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-std", + "sp-storage", "sp-transaction-pool", "sp-version", "staging-xcm", diff --git a/Cargo.toml b/Cargo.toml index 0affad4865..14a4294e82 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -170,6 +170,7 @@ sp-session = { default-features = false, git = "https://github.com/paritytech/po sp-staking = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } sp-state-machine = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } sp-std = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-storage = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } sp-tracing = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } sp-transaction-pool = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 04f873fb34..5545347023 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -354,30 +354,38 @@ pub fn run() -> Result<()> { } #[cfg(feature = "runtime-benchmarks")] Some(Subcommand::Benchmark(cmd)) => { - use polkadot_cli::Block; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; + use polkadot_cli::Block; + use sp_io::SubstrateHostFunctions; + let runner = cli.create_runner(cmd)?; // Switch on the concrete benchmark sub-command- match cmd { BenchmarkCmd::Pallet(cmd) => { - runner.sync_run(|config| cmd.run::(config)) + runner.sync_run(|config| cmd.run::(config)) } BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { let partials = new_partial::< + opal_runtime::Runtime, + opal_runtime::RuntimeApi, + OpalRuntimeExecutor, _, - default_runtime::RuntimeApi, - DefaultRuntimeExecutor, - _, - >(&config, crate::service::parachain_build_import_queue)?; + >( + &config, + crate::service::parachain_build_import_queue::, + )?; cmd.run(partials.client) }), BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { let partials = new_partial::< + opal_runtime::Runtime, + opal_runtime::RuntimeApi, + OpalRuntimeExecutor, _, - default_runtime::RuntimeApi, - DefaultRuntimeExecutor, - _, - >(&config, crate::service::parachain_build_import_queue)?; + >( + &config, + crate::service::parachain_build_import_queue::, + )?; let db = partials.backend.expose_db(); let storage = partials.backend.expose_storage(); diff --git a/pallets/collator-selection/Cargo.toml b/pallets/collator-selection/Cargo.toml index b3c33f5ba8..ceb6044e13 100644 --- a/pallets/collator-selection/Cargo.toml +++ b/pallets/collator-selection/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } parity-scale-codec = { workspace = true } -rand = { version = "0.8.5", default-features = false } +rand = { version = "0.8.5", default-features = false, features = ["std_rng"] } scale-info = { workspace = true } serde = { workspace = true } diff --git a/pallets/unique/src/benchmarking.rs b/pallets/unique/src/benchmarking.rs index c3a1be9906..9ddd258d7c 100644 --- a/pallets/unique/src/benchmarking.rs +++ b/pallets/unique/src/benchmarking.rs @@ -24,8 +24,8 @@ use pallet_common::{ erc::CrossAccountId, Config as CommonConfig, }; -use sp_std::vec; use sp_runtime::DispatchError; +use sp_std::vec; use up_data_structs::{ CollectionId, CollectionLimits, CollectionMode, MAX_COLLECTION_DESCRIPTION_LENGTH, MAX_COLLECTION_NAME_LENGTH, MAX_TOKEN_PREFIX_LENGTH, diff --git a/runtime/common/runtime_apis.rs b/runtime/common/runtime_apis.rs index 14d8c0b50d..228df8cd3f 100644 --- a/runtime/common/runtime_apis.rs +++ b/runtime/common/runtime_apis.rs @@ -569,7 +569,8 @@ macro_rules! impl_common_runtime_apis { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; + use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark}; + use sp_storage::TrackedStorageKey; let allowlist: Vec = vec![ // Total Issuance diff --git a/runtime/opal/Cargo.toml b/runtime/opal/Cargo.toml index 028d89a76d..5e9e63723b 100644 --- a/runtime/opal/Cargo.toml +++ b/runtime/opal/Cargo.toml @@ -136,6 +136,7 @@ std = [ 'sp-runtime/std', 'sp-session/std', 'sp-std/std', + 'sp-storage/std', 'sp-transaction-pool/std', 'sp-version/std', 'staging-xcm-builder/std', @@ -279,6 +280,7 @@ sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } sp-std = { workspace = true } +sp-storage = { workspace = true } sp-transaction-pool = { workspace = true } sp-version = { workspace = true } staging-xcm = { workspace = true } From 5f71c376196c4428a2019ba597e85c34b8f0f4dd Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Thu, 12 Oct 2023 07:04:26 +0000 Subject: [PATCH 115/143] fix: benchmarks --- node/cli/src/command.rs | 4 - node/cli/src/service.rs | 17 ---- pallets/app-promotion/src/benchmarking.rs | 2 +- .../collator-selection/src/benchmarking.rs | 23 ++---- pallets/nonfungible/src/benchmarking.rs | 82 +++++++++---------- pallets/refungible/src/benchmarking.rs | 40 ++++----- 6 files changed, 71 insertions(+), 97 deletions(-) diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 5545347023..8db885351d 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -42,10 +42,6 @@ use sc_service::config::{BasePath, PrometheusConfig}; use sp_runtime::traits::AccountIdConversion; use up_common::types::opaque::RuntimeId; -#[cfg(feature = "runtime-benchmarks")] -use crate::chain_spec::default_runtime; -#[cfg(feature = "runtime-benchmarks")] -use crate::service::DefaultRuntimeExecutor; #[cfg(feature = "quartz-runtime")] use crate::service::QuartzRuntimeExecutor; #[cfg(feature = "unique-runtime")] diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index aa73cc7ec4..aa9cdc4e83 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -93,23 +93,6 @@ pub struct QuartzRuntimeExecutor; /// Opal native executor instance. pub struct OpalRuntimeExecutor; -#[cfg(all(feature = "unique-runtime", feature = "runtime-benchmarks"))] -pub type DefaultRuntimeExecutor = UniqueRuntimeExecutor; - -#[cfg(all( - not(feature = "unique-runtime"), - feature = "quartz-runtime", - feature = "runtime-benchmarks" -))] -pub type DefaultRuntimeExecutor = QuartzRuntimeExecutor; - -#[cfg(all( - not(feature = "unique-runtime"), - not(feature = "quartz-runtime"), - feature = "runtime-benchmarks" -))] -pub type DefaultRuntimeExecutor = OpalRuntimeExecutor; - #[cfg(feature = "unique-runtime")] impl NativeExecutionDispatch for UniqueRuntimeExecutor { /// Only enable the benchmarking host functions when we actually want to benchmark. diff --git a/pallets/app-promotion/src/benchmarking.rs b/pallets/app-promotion/src/benchmarking.rs index 43425a2ae3..6ccedc20a3 100644 --- a/pallets/app-promotion/src/benchmarking.rs +++ b/pallets/app-promotion/src/benchmarking.rs @@ -109,7 +109,7 @@ mod benchmarks { } #[benchmark] - fn payout_stakers(b: Linear<0, 100>) -> Result<(), BenchmarkError> { + fn payout_stakers(b: Linear<1, 100>) -> Result<(), BenchmarkError> { let pallet_admin = account::("admin", 1, SEED); PromototionPallet::::set_admin_address( RawOrigin::Root.into(), diff --git a/pallets/collator-selection/src/benchmarking.rs b/pallets/collator-selection/src/benchmarking.rs index 1d2edb8d37..40282faf9b 100644 --- a/pallets/collator-selection/src/benchmarking.rs +++ b/pallets/collator-selection/src/benchmarking.rs @@ -171,7 +171,8 @@ mod benchmarks { // Both invulnerables and candidates count together against MaxCollators. // Maybe try putting it in braces? 1 .. (T::MaxCollators::get() - 2) #[benchmark] - fn add_invulnerable(b: Linear<1, MAX_COLLATORS>) -> Result<(), BenchmarkError> { + fn add_invulnerable(b: Linear<2, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { + let b = b - 1; register_validators::(b); register_invulnerables::(b); @@ -268,7 +269,8 @@ mod benchmarks { // worst case is when we have all the max-candidate slots filled except one, and we fill that // one. #[benchmark] - fn onboard(c: Linear<1, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { + fn onboard(c: Linear<2, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { + let c = c - 1; register_validators::(c); register_candidates::(c); @@ -293,9 +295,7 @@ mod benchmarks { // worst case is the last candidate leaving. #[benchmark] - fn offboard(c: Linear<0, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { - let c = c + 1; - + fn offboard(c: Linear<1, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { register_validators::(c); register_candidates::(c); @@ -317,8 +317,7 @@ mod benchmarks { // worst case is the last candidate leaving. #[benchmark] - fn release_license(c: Linear<0, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { - let c = c + 1; + fn release_license(c: Linear<1, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { let bond = balance_unit::(); register_validators::(c); @@ -343,8 +342,7 @@ mod benchmarks { // worst case is the last candidate leaving. #[benchmark] - fn force_release_license(c: Linear<0, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { - let c = c + 1; + fn force_release_license(c: Linear<1, MAX_INVULNERABLES>) -> Result<(), BenchmarkError> { let bond = balance_unit::(); register_validators::(c); @@ -400,12 +398,9 @@ mod benchmarks { // worst case for new session. #[benchmark] fn new_session( - r: Linear<0, MAX_INVULNERABLES>, - c: Linear<0, MAX_INVULNERABLES>, + r: Linear<1, MAX_INVULNERABLES>, + c: Linear<1, MAX_INVULNERABLES>, ) -> Result<(), BenchmarkError> { - let r = r + 1; - let c = c + 1; - frame_system::Pallet::::set_block_number(0u32.into()); register_validators::(c); diff --git a/pallets/nonfungible/src/benchmarking.rs b/pallets/nonfungible/src/benchmarking.rs index 90f7499be2..5e87041ea7 100644 --- a/pallets/nonfungible/src/benchmarking.rs +++ b/pallets/nonfungible/src/benchmarking.rs @@ -17,9 +17,7 @@ use frame_benchmarking::v2::{account, benchmarks, BenchmarkError}; use pallet_common::{ bench_init, - benchmarking::{ - create_collection_raw, load_is_admin_and_property_permissions, property_key, property_value, - }, + benchmarking::{create_collection_raw, property_key, property_value}, CommonCollectionOperations, }; use sp_std::prelude::*; @@ -334,49 +332,51 @@ mod benchmarks { Ok(()) } + // TODO: #[benchmark] fn init_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { - bench_init! { - owner: sub; collection: collection(owner); - owner: cross_from_sub; - }; - - let perms = (0..b) - .map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: false, - collection_admin: true, - token_owner: true, - }, - }) - .collect::>(); - >::set_token_property_permissions(&collection, &owner, perms)?; - let props = (0..b) - .map(|k| Property { - key: property_key(k as usize), - value: property_value(), - }) - .collect::>(); - let item = create_max_item(&collection, &owner, owner.clone())?; + // bench_init! { + // owner: sub; collection: collection(owner); + // owner: cross_from_sub; + // }; + + // let perms = (0..b) + // .map(|k| PropertyKeyPermission { + // key: property_key(k as usize), + // permission: PropertyPermission { + // mutable: false, + // collection_admin: true, + // token_owner: true, + // }, + // }) + // .collect::>(); + // >::set_token_property_permissions(&collection, &owner, perms)?; + #[block] + {} + // let props = (0..b) + // .map(|k| Property { + // key: property_key(k as usize), + // value: property_value(), + // }) + // .collect::>(); + // let item = create_max_item(&collection, &owner, owner.clone())?; // let (is_collection_admin, property_permissions) = // load_is_admin_and_property_permissions(&collection, &owner); - todo!(); - #[block] - { - // let mut property_writer = - // pallet_common::BenchmarkPropertyWriter::new(&collection, lazy_collection_info); - - // property_writer.write_token_properties( - // item, - // props.into_iter(), - // crate::erc::ERC721TokenEvent::TokenChanged { - // token_id: item.into(), - // } - // .to_log(T::ContractAddress::get()), - // )?; - } + // #[block] + // { + // let mut property_writer = + // pallet_common::BenchmarkPropertyWriter::new(&collection, lazy_collection_info); + + // property_writer.write_token_properties( + // item, + // props.into_iter(), + // crate::erc::ERC721TokenEvent::TokenChanged { + // token_id: item.into(), + // } + // .to_log(T::ContractAddress::get()), + // )?; + // } Ok(()) } diff --git a/pallets/refungible/src/benchmarking.rs b/pallets/refungible/src/benchmarking.rs index 06664fe2f3..f93df1a10e 100644 --- a/pallets/refungible/src/benchmarking.rs +++ b/pallets/refungible/src/benchmarking.rs @@ -490,35 +490,35 @@ mod benchmarks { Ok(()) } + // TODO: #[benchmark] fn init_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { - bench_init! { - owner: sub; collection: collection(owner); - owner: cross_from_sub; - }; + // bench_init! { + // owner: sub; collection: collection(owner); + // owner: cross_from_sub; + // }; + + // let perms = (0..b) + // .map(|k| PropertyKeyPermission { + // key: property_key(k as usize), + // permission: PropertyPermission { + // mutable: false, + // collection_admin: true, + // token_owner: true, + // }, + // }) + // .collect::>(); + // >::set_token_property_permissions(&collection, &owner, perms)?; - let perms = (0..b) - .map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: false, - collection_admin: true, - token_owner: true, - }, - }) - .collect::>(); - >::set_token_property_permissions(&collection, &owner, perms)?; + #[block] + {} // let props = (0..b).map(|k| Property { // key: property_key(k as usize), // value: property_value(), // }).collect::>(); // let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - // let (is_collection_admin, property_permissions) = load_is_admin_and_property_permissions(&collection, &owner); - - #[block] - {} - todo!(); + // let (is_collection_admin, property_permissions) = load_is_admin_and_property_permissions(&collection, &owner) // let mut property_writer = pallet_common::collection_info_loaded_property_writer( // &collection, // is_collection_admin, From 3db37f4ef63b9cf365dcc41de291babd55bf0e35 Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Thu, 12 Oct 2023 08:15:35 +0000 Subject: [PATCH 116/143] fix: clippy warnings --- Cargo.lock | 2 + node/cli/src/chain_spec.rs | 2 +- node/cli/src/command.rs | 1 + node/cli/src/rpc.rs | 4 +- node/cli/src/service.rs | 66 ++++++++++++++-------- pallets/app-promotion/src/benchmarking.rs | 16 +++--- pallets/nonfungible/src/benchmarking.rs | 2 +- pallets/refungible/src/benchmarking.rs | 2 +- pallets/unique/src/benchmarking.rs | 26 ++++----- runtime/common/config/xcm/foreignassets.rs | 7 +-- runtime/quartz/Cargo.toml | 1 + runtime/unique/Cargo.toml | 1 + 12 files changed, 75 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4767157284..a8c1591024 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10145,6 +10145,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-std", + "sp-storage", "sp-transaction-pool", "sp-version", "staging-xcm", @@ -14897,6 +14898,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-std", + "sp-storage", "sp-transaction-pool", "sp-version", "staging-xcm", diff --git a/node/cli/src/chain_spec.rs b/node/cli/src/chain_spec.rs index 3efd24a42a..6e5e9bf3f7 100644 --- a/node/cli/src/chain_spec.rs +++ b/node/cli/src/chain_spec.rs @@ -238,7 +238,7 @@ macro_rules! testnet_genesis { vesting: VestingConfig { vesting: vec![] }, parachain_info: ParachainInfoConfig { parachain_id: $id.into(), - Default::default() + ..Default::default() }, aura: AuraConfig { authorities: $initial_invulnerables diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 8db885351d..945014c606 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -399,6 +399,7 @@ pub fn run() -> Result<()> { Some(Subcommand::TryRuntime(cmd)) => { use std::{future::Future, pin::Pin}; + use polkadot_cli::Block; use sc_executor::{sp_wasm_interface::ExtendedHostFunctions, NativeExecutionDispatch}; use try_runtime_cli::block_building_info::timestamp_with_aura_info; diff --git a/node/cli/src/rpc.rs b/node/cli/src/rpc.rs index 8c33ade0b5..1fac1ea574 100644 --- a/node/cli/src/rpc.rs +++ b/node/cli/src/rpc.rs @@ -67,7 +67,7 @@ pub struct FullDeps { } /// Instantiate all Full RPC extensions. -pub fn create_full( +pub fn create_full( io: &mut RpcModule<()>, deps: FullDeps, ) -> Result<(), Box> @@ -244,7 +244,7 @@ where EthFilter::new( client.clone(), eth_backend, - graph.clone(), + graph, filter_pool, 500_usize, // max stored filters max_past_logs, diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index aa9cdc4e83..98a5d96b35 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -498,7 +498,7 @@ where select_chain, }; - create_full::<_, _, _, Runtime, RuntimeApi, _>(&mut rpc_handle, full_deps)?; + create_full::<_, _, _, Runtime, _>(&mut rpc_handle, full_deps)?; let eth_deps = EthDeps { client, @@ -547,7 +547,7 @@ where config: parachain_config, keystore: params.keystore_container.keystore(), backend: backend.clone(), - network: network.clone(), + network, sync_service: sync_service.clone(), system_rpc_tx, telemetry: telemetry.as_mut(), @@ -600,19 +600,21 @@ where if validator { start_consensus( client.clone(), - backend.clone(), - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), transaction_pool, - sync_service.clone(), - params.keystore_container.keystore(), - overseer_handle, - relay_chain_slot_duration, - para_id, - collator_key.expect("cli args do not allow this"), - announce_block, + StartConsensusParameters { + backend: backend.clone(), + prometheus_registry: prometheus_registry.as_ref(), + telemetry: telemetry.as_ref().map(|t| t.handle()), + task_manager: &task_manager, + relay_chain_interface: relay_chain_interface.clone(), + sync_oracle: sync_service, + keystore: params.keystore_container.keystore(), + overseer_handle, + relay_chain_slot_duration, + para_id, + collator_key: collator_key.expect("cli args do not allow this"), + announce_block, + } )?; } @@ -670,16 +672,12 @@ where .map_err(Into::into) } -pub fn start_consensus( - client: Arc>, +pub struct StartConsensusParameters<'a> { backend: Arc, - prometheus_registry: Option<&Registry>, + prometheus_registry: Option<&'a Registry>, telemetry: Option, - task_manager: &TaskManager, + task_manager: &'a TaskManager, relay_chain_interface: Arc, - transaction_pool: Arc< - sc_transaction_pool::FullPool>, - >, sync_oracle: Arc>, keystore: KeystorePtr, overseer_handle: OverseerHandle, @@ -687,6 +685,14 @@ pub fn start_consensus( para_id: ParaId, collator_key: CollatorPair, announce_block: Arc>) + Send + Sync>, +} + +pub fn start_consensus( + client: Arc>, + transaction_pool: Arc< + sc_transaction_pool::FullPool>, + >, + parameters: StartConsensusParameters<'_>, ) -> Result<(), sc_service::Error> where ExecutorDispatch: NativeExecutionDispatch + 'static, @@ -697,6 +703,20 @@ where RuntimeApi::RuntimeApi: RuntimeApiDep + 'static, Runtime: RuntimeInstance, { + let StartConsensusParameters { + backend, + prometheus_registry, + telemetry, + task_manager, + relay_chain_interface, + sync_oracle, + keystore, + overseer_handle, + relay_chain_slot_duration, + para_id, + collator_key, + announce_block, + } = parameters; let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( @@ -704,7 +724,7 @@ where client.clone(), transaction_pool, prometheus_registry, - telemetry.clone(), + telemetry, ); let proposer = Proposer::new(proposer_factory); @@ -1043,7 +1063,7 @@ where select_chain, }; - create_full::<_, _, _, Runtime, RuntimeApi, _>(&mut rpc_module, full_deps)?; + create_full::<_, _, _, Runtime, _>(&mut rpc_module, full_deps)?; let eth_deps = EthDeps { client, diff --git a/pallets/app-promotion/src/benchmarking.rs b/pallets/app-promotion/src/benchmarking.rs index 6ccedc20a3..31588f33a7 100644 --- a/pallets/app-promotion/src/benchmarking.rs +++ b/pallets/app-promotion/src/benchmarking.rs @@ -161,7 +161,7 @@ mod benchmarks { T::RelayBlockNumberProvider::set_block_number(30_000.into()); #[extrinsic_call] - _(RawOrigin::Signed(pallet_admin.clone()), Some(b as u8)); + _(RawOrigin::Signed(pallet_admin), Some(b as u8)); Ok(()) } @@ -178,7 +178,7 @@ mod benchmarks { #[extrinsic_call] _( - RawOrigin::Signed(caller.clone()), + RawOrigin::Signed(caller), share * ::Currency::total_balance(&caller), ); @@ -211,7 +211,7 @@ mod benchmarks { .collect::, _>>()?; #[extrinsic_call] - _(RawOrigin::Signed(caller.clone())); + _(RawOrigin::Signed(caller)); Ok(()) } @@ -242,7 +242,7 @@ mod benchmarks { #[extrinsic_call] _( - RawOrigin::Signed(caller.clone()), + RawOrigin::Signed(caller), Into::>::into(1000u128) * T::Nominal::get(), ); @@ -268,7 +268,7 @@ mod benchmarks { let collection = create_nft_collection::(caller)?; #[extrinsic_call] - _(RawOrigin::Signed(pallet_admin.clone()), collection); + _(RawOrigin::Signed(pallet_admin), collection); Ok(()) } @@ -296,7 +296,7 @@ mod benchmarks { )?; #[extrinsic_call] - _(RawOrigin::Signed(pallet_admin.clone()), collection); + _(RawOrigin::Signed(pallet_admin), collection); Ok(()) } @@ -319,7 +319,7 @@ mod benchmarks { >::finish(RawOrigin::Root.into(), address, data)?; #[extrinsic_call] - _(RawOrigin::Signed(pallet_admin.clone()), address); + _(RawOrigin::Signed(pallet_admin), address); Ok(()) } @@ -346,7 +346,7 @@ mod benchmarks { )?; #[extrinsic_call] - _(RawOrigin::Signed(pallet_admin.clone()), address); + _(RawOrigin::Signed(pallet_admin), address); Ok(()) } diff --git a/pallets/nonfungible/src/benchmarking.rs b/pallets/nonfungible/src/benchmarking.rs index 5e87041ea7..fa145f53c7 100644 --- a/pallets/nonfungible/src/benchmarking.rs +++ b/pallets/nonfungible/src/benchmarking.rs @@ -75,7 +75,7 @@ mod benchmarks { #[block] { - create_max_item(&collection, &sender, to.clone())?; + create_max_item(&collection, &sender, to)?; } Ok(()) diff --git a/pallets/refungible/src/benchmarking.rs b/pallets/refungible/src/benchmarking.rs index f93df1a10e..9d3657b16e 100644 --- a/pallets/refungible/src/benchmarking.rs +++ b/pallets/refungible/src/benchmarking.rs @@ -82,7 +82,7 @@ mod benchmarks { #[block] { - create_max_item(&collection, &sender, [(to.clone(), 200)])?; + create_max_item(&collection, &sender, [(to, 200)])?; } Ok(()) diff --git a/pallets/unique/src/benchmarking.rs b/pallets/unique/src/benchmarking.rs index 9ddd258d7c..463291faf7 100644 --- a/pallets/unique/src/benchmarking.rs +++ b/pallets/unique/src/benchmarking.rs @@ -107,7 +107,7 @@ mod benchmarks { let collection = create_nft_collection::(caller.clone())?; #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), collection); + _(RawOrigin::Signed(caller), collection); Ok(()) } @@ -120,7 +120,7 @@ mod benchmarks { #[extrinsic_call] _( - RawOrigin::Signed(caller.clone()), + RawOrigin::Signed(caller), collection, T::CrossAccountId::from_sub(allowlist_account), ); @@ -141,7 +141,7 @@ mod benchmarks { #[extrinsic_call] _( - RawOrigin::Signed(caller.clone()), + RawOrigin::Signed(caller), collection, T::CrossAccountId::from_sub(allowlist_account), ); @@ -156,7 +156,7 @@ mod benchmarks { let new_owner: T::AccountId = account("admin", 0, SEED); #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), collection, new_owner); + _(RawOrigin::Signed(caller), collection, new_owner); Ok(()) } @@ -169,7 +169,7 @@ mod benchmarks { #[extrinsic_call] _( - RawOrigin::Signed(caller.clone()), + RawOrigin::Signed(caller), collection, T::CrossAccountId::from_sub(new_admin), ); @@ -190,7 +190,7 @@ mod benchmarks { #[extrinsic_call] _( - RawOrigin::Signed(caller.clone()), + RawOrigin::Signed(caller), collection, T::CrossAccountId::from_sub(new_admin), ); @@ -204,11 +204,7 @@ mod benchmarks { let collection = create_nft_collection::(caller.clone())?; #[extrinsic_call] - _( - RawOrigin::Signed(caller.clone()), - collection, - caller.clone(), - ); + _(RawOrigin::Signed(caller), collection, caller.clone()); Ok(()) } @@ -224,7 +220,7 @@ mod benchmarks { )?; #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), collection); + _(RawOrigin::Signed(caller), collection); Ok(()) } @@ -241,7 +237,7 @@ mod benchmarks { >::confirm_sponsorship(RawOrigin::Signed(caller.clone()).into(), collection)?; #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), collection); + _(RawOrigin::Signed(caller), collection); Ok(()) } @@ -252,7 +248,7 @@ mod benchmarks { let collection = create_nft_collection::(caller.clone())?; #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), collection, false); + _(RawOrigin::Signed(caller), collection, false); Ok(()) } @@ -275,7 +271,7 @@ mod benchmarks { }; #[extrinsic_call] - set_collection_limits(RawOrigin::Signed(caller.clone()), collection, cl); + set_collection_limits(RawOrigin::Signed(caller), collection, cl); Ok(()) } diff --git a/runtime/common/config/xcm/foreignassets.rs b/runtime/common/config/xcm/foreignassets.rs index e33b583284..154976836b 100644 --- a/runtime/common/config/xcm/foreignassets.rs +++ b/runtime/common/config/xcm/foreignassets.rs @@ -77,19 +77,18 @@ impl> MaybeEquivalence>::try_as_foreign(asset_id.clone())?; + let fid = >::try_as_foreign(*asset_id)?; XcmForeignAssetIdMapping::::get_multi_location(fid) } } diff --git a/runtime/quartz/Cargo.toml b/runtime/quartz/Cargo.toml index 34e342ec8a..60b55375d0 100644 --- a/runtime/quartz/Cargo.toml +++ b/runtime/quartz/Cargo.toml @@ -271,6 +271,7 @@ sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } sp-std = { workspace = true } +sp-storage = { workspace = true } sp-transaction-pool = { workspace = true } sp-version = { workspace = true } staging-xcm = { workspace = true } diff --git a/runtime/unique/Cargo.toml b/runtime/unique/Cargo.toml index 7314c8b6ae..a7e7c5a664 100644 --- a/runtime/unique/Cargo.toml +++ b/runtime/unique/Cargo.toml @@ -274,6 +274,7 @@ sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } sp-std = { workspace = true } +sp-storage = { workspace = true } sp-transaction-pool = { workspace = true } sp-version = { workspace = true } staging-xcm = { workspace = true } From 18a5c326bd369eb58a17253a632c9e70b206b239 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Thu, 12 Oct 2023 14:28:24 +0200 Subject: [PATCH 117/143] ci: fix unnecessary node rebuilds during benchmarking --- Makefile | 91 ++++++++++++++++++-------------------------------------- 1 file changed, 29 insertions(+), 62 deletions(-) diff --git a/Makefile b/Makefile index 98664e2762..dbd6b0fa01 100644 --- a/Makefile +++ b/Makefile @@ -88,69 +88,36 @@ CollectionHelpers: CollectionHelpers.sol evm_stubs: UniqueFungible UniqueNFT UniqueRefungible UniqueRefungibleToken ContractHelpers CollectionHelpers -.PHONY: _bench -_bench: - cargo run --profile production --features runtime-benchmarks,$(RUNTIME) -- \ - benchmark pallet --pallet pallet-$(if $(PALLET),$(PALLET),$(error Must set PALLET)) \ +# TODO: Create benchmarking profile, make it a proper dependency +.PHONY: benchmarking-node +benchmarking-node: + cargo build --profile production --features runtime-benchmarks + +define _bench = +.PHONY: bench-$(1) +bench-$(1): benchmarking-node + ./target/production/unique-collator \ + benchmark pallet --pallet pallet-$(1) \ --wasm-execution compiled --extrinsic '*' \ - $(if $(TEMPLATE),$(TEMPLATE),--template=.maintain/frame-weight-template.hbs) --steps=50 --repeat=80 --heap-pages=4096 \ - --output=$(if $(OUTPUT),$(OUTPUT),./pallets/$(if $(PALLET_DIR),$(PALLET_DIR),$(PALLET))/src/weights.rs) - -.PHONY: bench-evm-migration -bench-evm-migration: - make _bench PALLET=evm-migration - -.PHONY: bench-configuration -bench-configuration: - make _bench PALLET=configuration - -.PHONY: bench-common -bench-common: - make _bench PALLET=common - -.PHONY: bench-unique -bench-unique: - make _bench PALLET=unique - -.PHONY: bench-fungible -bench-fungible: - make _bench PALLET=fungible - -.PHONY: bench-refungible -bench-refungible: - make _bench PALLET=refungible - -.PHONY: bench-nonfungible -bench-nonfungible: - make _bench PALLET=nonfungible - -.PHONY: bench-structure -bench-structure: - make _bench PALLET=structure - -.PHONY: bench-foreign-assets -bench-foreign-assets: - make _bench PALLET=foreign-assets - -.PHONY: bench-collator-selection -bench-collator-selection: - make _bench PALLET=collator-selection - -.PHONY: bench-identity -bench-identity: - make _bench PALLET=identity - -.PHONY: bench-app-promotion -bench-app-promotion: - make _bench PALLET=app-promotion - -.PHONY: bench-maintenance -bench-maintenance: - make _bench PALLET=maintenance - -.PHONY: bench-xcm -bench-xcm: - make _bench PALLET=xcm OUTPUT=./runtime/common/weights/xcm.rs TEMPLATE="--template=.maintain/external-weight-template.hbs" + $(if $(4),$(4),--template=.maintain/frame-weight-template.hbs) --steps=50 --repeat=80 --heap-pages=4096 \ + --output=$$(if $(3),$(3),./pallets/$(if $(2),$(2),$(1))/src/weights.rs) +endef + +# _bench,pallet,(pallet_dir|),(output|),(extra|) +$(eval $(call _bench,evm-migration)) +$(eval $(call _bench,configuration)) +$(eval $(call _bench,common)) +$(eval $(call _bench,unique)) +$(eval $(call _bench,fungible)) +$(eval $(call _bench,refungible)) +$(eval $(call _bench,nonfungible)) +$(eval $(call _bench,structure)) +$(eval $(call _bench,foreign-assets)) +$(eval $(call _bench,collator-selection)) +$(eval $(call _bench,identity)) +$(eval $(call _bench,app-promotion)) +$(eval $(call _bench,maintenance)) +$(eval $(call _bench,xcm,,./runtime/common/weights/xcm.rs,"--template=.maintain/external-weights/template.hbs")) .PHONY: bench bench: bench-app-promotion bench-common bench-evm-migration bench-unique bench-structure bench-fungible bench-refungible bench-nonfungible bench-configuration bench-foreign-assets bench-maintenance bench-xcm bench-collator-selection bench-identity From 4275a8f9c1426994861dd112b2d918623f967f1e Mon Sep 17 00:00:00 2001 From: Grigoriy Simonov Date: Thu, 12 Oct 2023 13:09:15 +0000 Subject: [PATCH 118/143] fix: remove preimage benchmark --- node/cli/src/service.rs | 2 +- pallets/app-promotion/src/weights.rs | 432 ++++++++++++------------ pallets/maintenance/src/benchmarking.rs | 25 +- pallets/maintenance/src/lib.rs | 45 --- pallets/maintenance/src/weights.rs | 25 -- tests/src/maintenance.seqtest.ts | 81 ----- 6 files changed, 218 insertions(+), 392 deletions(-) diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 98a5d96b35..ce16688038 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -614,7 +614,7 @@ where para_id, collator_key: collator_key.expect("cli args do not allow this"), announce_block, - } + }, )?; } diff --git a/pallets/app-promotion/src/weights.rs b/pallets/app-promotion/src/weights.rs index c2cfd140bd..fc1a4ebba4 100644 --- a/pallets/app-promotion/src/weights.rs +++ b/pallets/app-promotion/src/weights.rs @@ -3,10 +3,10 @@ //! Autogenerated weights for pallet_app_promotion //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-12, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! HOSTNAME: `ubuntu-11`, CPU: `QEMU Virtual CPU version 2.5+` +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: // target/production/unique-collator @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/app-promotion/src/weights.rs @@ -48,185 +48,185 @@ pub trait WeightInfo { /// Weights for pallet_app_promotion using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Maintenance Enabled (r:1 w:0) - /// Proof: Maintenance Enabled (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: AppPromotion PendingUnstake (r:1 w:1) - /// Proof: AppPromotion PendingUnstake (max_values: None, max_size: Some(157), added: 2632, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:3 w:3) - /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:3 w:0) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: `Maintenance::Enabled` (r:1 w:0) + /// Proof: `Maintenance::Enabled` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::PendingUnstake` (r:1 w:1) + /// Proof: `AppPromotion::PendingUnstake` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:3 w:3) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:3 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 3]`. fn on_initialize(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `222 + b * (285 ±0)` // Estimated: `3622 + b * (3774 ±0)` - // Minimum execution time: 4_107_000 picoseconds. - Weight::from_parts(4_751_973, 3622) - // Standard Error: 4_668 - .saturating_add(Weight::from_parts(10_570_330, 0).saturating_mul(b.into())) + // Minimum execution time: 6_031_000 picoseconds. + Weight::from_parts(6_880_848, 3622) + // Standard Error: 18_753 + .saturating_add(Weight::from_parts(22_907_186, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } - /// Storage: AppPromotion Admin (r:0 w:1) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:0 w:1) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_admin_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_459_000 picoseconds. - Weight::from_parts(3_627_000, 0) + // Minimum execution time: 7_565_000 picoseconds. + Weight::from_parts(7_795_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: AppPromotion Admin (r:1 w:0) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: Configuration AppPromomotionConfigurationOverride (r:1 w:0) - /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) - /// Storage: ParachainSystem ValidationData (r:1 w:0) - /// Proof Skipped: ParachainSystem ValidationData (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AppPromotion PreviousCalculatedRecord (r:1 w:1) - /// Proof: AppPromotion PreviousCalculatedRecord (max_values: Some(1), max_size: Some(36), added: 531, mode: MaxEncodedLen) - /// Storage: AppPromotion Staked (r:1001 w:1000) - /// Proof: AppPromotion Staked (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) - /// Storage: System Account (r:101 w:101) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:100 w:100) - /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:100 w:0) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: AppPromotion TotalStaked (r:1 w:1) - /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:1 w:0) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Configuration::AppPromomotionConfigurationOverride` (r:1 w:0) + /// Proof: `Configuration::AppPromomotionConfigurationOverride` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AppPromotion::PreviousCalculatedRecord` (r:1 w:1) + /// Proof: `AppPromotion::PreviousCalculatedRecord` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::Staked` (r:1001 w:1000) + /// Proof: `AppPromotion::Staked` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:101 w:101) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:100 w:100) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:100 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::TotalStaked` (r:1 w:1) + /// Proof: `AppPromotion::TotalStaked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `b` is `[1, 100]`. fn payout_stakers(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `564 + b * (641 ±0)` // Estimated: `3593 + b * (25550 ±0)` - // Minimum execution time: 73_245_000 picoseconds. - Weight::from_parts(74_196_000, 3593) - // Standard Error: 8_231 - .saturating_add(Weight::from_parts(49_090_053, 0).saturating_mul(b.into())) + // Minimum execution time: 146_577_000 picoseconds. + Weight::from_parts(147_970_000, 3593) + // Standard Error: 59_065 + .saturating_add(Weight::from_parts(115_527_092, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((13_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((12_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 25550).saturating_mul(b.into())) } - /// Storage: AppPromotion StakesPerAccount (r:1 w:1) - /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Configuration AppPromomotionConfigurationOverride (r:1 w:0) - /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:1) - /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:0) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: ParachainSystem ValidationData (r:1 w:0) - /// Proof Skipped: ParachainSystem ValidationData (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AppPromotion Staked (r:1 w:1) - /// Proof: AppPromotion Staked (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) - /// Storage: AppPromotion TotalStaked (r:1 w:1) - /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `AppPromotion::StakesPerAccount` (r:1 w:1) + /// Proof: `AppPromotion::StakesPerAccount` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `Configuration::AppPromomotionConfigurationOverride` (r:1 w:0) + /// Proof: `Configuration::AppPromomotionConfigurationOverride` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:1) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AppPromotion::Staked` (r:1 w:1) + /// Proof: `AppPromotion::Staked` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::TotalStaked` (r:1 w:1) + /// Proof: `AppPromotion::TotalStaked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn stake() -> Weight { // Proof Size summary in bytes: // Measured: `389` // Estimated: `4764` - // Minimum execution time: 21_088_000 picoseconds. - Weight::from_parts(21_639_000, 4764) + // Minimum execution time: 46_889_000 picoseconds. + Weight::from_parts(47_549_000, 4764) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Configuration AppPromomotionConfigurationOverride (r:1 w:0) - /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) - /// Storage: AppPromotion PendingUnstake (r:1 w:1) - /// Proof: AppPromotion PendingUnstake (max_values: None, max_size: Some(157), added: 2632, mode: MaxEncodedLen) - /// Storage: AppPromotion Staked (r:11 w:10) - /// Proof: AppPromotion Staked (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) - /// Storage: AppPromotion TotalStaked (r:1 w:1) - /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: AppPromotion StakesPerAccount (r:0 w:1) - /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Configuration::AppPromomotionConfigurationOverride` (r:1 w:0) + /// Proof: `Configuration::AppPromomotionConfigurationOverride` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::PendingUnstake` (r:1 w:1) + /// Proof: `AppPromotion::PendingUnstake` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::Staked` (r:11 w:10) + /// Proof: `AppPromotion::Staked` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::TotalStaked` (r:1 w:1) + /// Proof: `AppPromotion::TotalStaked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::StakesPerAccount` (r:0 w:1) + /// Proof: `AppPromotion::StakesPerAccount` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn unstake_all() -> Weight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 42_086_000 picoseconds. - Weight::from_parts(43_149_000, 29095) + // Minimum execution time: 63_069_000 picoseconds. + Weight::from_parts(64_522_000, 29095) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } - /// Storage: Configuration AppPromomotionConfigurationOverride (r:1 w:0) - /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) - /// Storage: AppPromotion PendingUnstake (r:1 w:1) - /// Proof: AppPromotion PendingUnstake (max_values: None, max_size: Some(157), added: 2632, mode: MaxEncodedLen) - /// Storage: AppPromotion Staked (r:11 w:10) - /// Proof: AppPromotion Staked (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) - /// Storage: AppPromotion TotalStaked (r:1 w:1) - /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: AppPromotion StakesPerAccount (r:1 w:1) - /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Configuration::AppPromomotionConfigurationOverride` (r:1 w:0) + /// Proof: `Configuration::AppPromomotionConfigurationOverride` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::PendingUnstake` (r:1 w:1) + /// Proof: `AppPromotion::PendingUnstake` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::Staked` (r:11 w:10) + /// Proof: `AppPromotion::Staked` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::TotalStaked` (r:1 w:1) + /// Proof: `AppPromotion::TotalStaked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::StakesPerAccount` (r:1 w:1) + /// Proof: `AppPromotion::StakesPerAccount` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn unstake_partial() -> Weight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 46_458_000 picoseconds. - Weight::from_parts(47_333_000, 29095) + // Minimum execution time: 84_649_000 picoseconds. + Weight::from_parts(86_173_000, 29095) .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } - /// Storage: AppPromotion Admin (r:1 w:0) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:1 w:0) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn sponsor_collection() -> Weight { // Proof Size summary in bytes: // Measured: `1060` // Estimated: `4325` - // Minimum execution time: 12_827_000 picoseconds. - Weight::from_parts(13_610_000, 4325) + // Minimum execution time: 24_396_000 picoseconds. + Weight::from_parts(24_917_000, 4325) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: AppPromotion Admin (r:1 w:0) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:1 w:0) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn stop_sponsoring_collection() -> Weight { // Proof Size summary in bytes: // Measured: `1092` // Estimated: `4325` - // Minimum execution time: 11_899_000 picoseconds. - Weight::from_parts(12_303_000, 4325) + // Minimum execution time: 22_412_000 picoseconds. + Weight::from_parts(23_033_000, 4325) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: AppPromotion Admin (r:1 w:0) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: EvmContractHelpers Sponsoring (r:0 w:1) - /// Proof: EvmContractHelpers Sponsoring (max_values: None, max_size: Some(62), added: 2537, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:1 w:0) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `EvmContractHelpers::Sponsoring` (r:0 w:1) + /// Proof: `EvmContractHelpers::Sponsoring` (`max_values`: None, `max_size`: Some(62), added: 2537, mode: `MaxEncodedLen`) fn sponsor_contract() -> Weight { // Proof Size summary in bytes: // Measured: `198` // Estimated: `1517` - // Minimum execution time: 10_226_000 picoseconds. - Weight::from_parts(10_549_000, 1517) + // Minimum execution time: 21_621_000 picoseconds. + Weight::from_parts(22_041_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: AppPromotion Admin (r:1 w:0) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: EvmContractHelpers Sponsoring (r:1 w:1) - /// Proof: EvmContractHelpers Sponsoring (max_values: None, max_size: Some(62), added: 2537, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:1 w:0) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `EvmContractHelpers::Sponsoring` (r:1 w:1) + /// Proof: `EvmContractHelpers::Sponsoring` (`max_values`: None, `max_size`: Some(62), added: 2537, mode: `MaxEncodedLen`) fn stop_sponsoring_contract() -> Weight { // Proof Size summary in bytes: // Measured: `396` // Estimated: `3527` - // Minimum execution time: 10_528_000 picoseconds. - Weight::from_parts(10_842_000, 3527) + // Minimum execution time: 19_186_000 picoseconds. + Weight::from_parts(19_616_000, 3527) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -234,185 +234,185 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Maintenance Enabled (r:1 w:0) - /// Proof: Maintenance Enabled (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: AppPromotion PendingUnstake (r:1 w:1) - /// Proof: AppPromotion PendingUnstake (max_values: None, max_size: Some(157), added: 2632, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:3 w:3) - /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:3 w:0) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: `Maintenance::Enabled` (r:1 w:0) + /// Proof: `Maintenance::Enabled` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::PendingUnstake` (r:1 w:1) + /// Proof: `AppPromotion::PendingUnstake` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:3 w:3) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:3 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 3]`. fn on_initialize(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `222 + b * (285 ±0)` // Estimated: `3622 + b * (3774 ±0)` - // Minimum execution time: 4_107_000 picoseconds. - Weight::from_parts(4_751_973, 3622) - // Standard Error: 4_668 - .saturating_add(Weight::from_parts(10_570_330, 0).saturating_mul(b.into())) + // Minimum execution time: 6_031_000 picoseconds. + Weight::from_parts(6_880_848, 3622) + // Standard Error: 18_753 + .saturating_add(Weight::from_parts(22_907_186, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } - /// Storage: AppPromotion Admin (r:0 w:1) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:0 w:1) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_admin_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_459_000 picoseconds. - Weight::from_parts(3_627_000, 0) + // Minimum execution time: 7_565_000 picoseconds. + Weight::from_parts(7_795_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: AppPromotion Admin (r:1 w:0) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: Configuration AppPromomotionConfigurationOverride (r:1 w:0) - /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) - /// Storage: ParachainSystem ValidationData (r:1 w:0) - /// Proof Skipped: ParachainSystem ValidationData (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AppPromotion PreviousCalculatedRecord (r:1 w:1) - /// Proof: AppPromotion PreviousCalculatedRecord (max_values: Some(1), max_size: Some(36), added: 531, mode: MaxEncodedLen) - /// Storage: AppPromotion Staked (r:1001 w:1000) - /// Proof: AppPromotion Staked (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) - /// Storage: System Account (r:101 w:101) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:100 w:100) - /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:100 w:0) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: AppPromotion TotalStaked (r:1 w:1) - /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:1 w:0) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Configuration::AppPromomotionConfigurationOverride` (r:1 w:0) + /// Proof: `Configuration::AppPromomotionConfigurationOverride` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AppPromotion::PreviousCalculatedRecord` (r:1 w:1) + /// Proof: `AppPromotion::PreviousCalculatedRecord` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::Staked` (r:1001 w:1000) + /// Proof: `AppPromotion::Staked` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:101 w:101) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:100 w:100) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:100 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::TotalStaked` (r:1 w:1) + /// Proof: `AppPromotion::TotalStaked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `b` is `[1, 100]`. fn payout_stakers(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `564 + b * (641 ±0)` // Estimated: `3593 + b * (25550 ±0)` - // Minimum execution time: 73_245_000 picoseconds. - Weight::from_parts(74_196_000, 3593) - // Standard Error: 8_231 - .saturating_add(Weight::from_parts(49_090_053, 0).saturating_mul(b.into())) + // Minimum execution time: 146_577_000 picoseconds. + Weight::from_parts(147_970_000, 3593) + // Standard Error: 59_065 + .saturating_add(Weight::from_parts(115_527_092, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((13_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((12_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 25550).saturating_mul(b.into())) } - /// Storage: AppPromotion StakesPerAccount (r:1 w:1) - /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Configuration AppPromomotionConfigurationOverride (r:1 w:0) - /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:1) - /// Proof: Balances Freezes (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:0) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: ParachainSystem ValidationData (r:1 w:0) - /// Proof Skipped: ParachainSystem ValidationData (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AppPromotion Staked (r:1 w:1) - /// Proof: AppPromotion Staked (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) - /// Storage: AppPromotion TotalStaked (r:1 w:1) - /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `AppPromotion::StakesPerAccount` (r:1 w:1) + /// Proof: `AppPromotion::StakesPerAccount` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `Configuration::AppPromomotionConfigurationOverride` (r:1 w:0) + /// Proof: `Configuration::AppPromomotionConfigurationOverride` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:1) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AppPromotion::Staked` (r:1 w:1) + /// Proof: `AppPromotion::Staked` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::TotalStaked` (r:1 w:1) + /// Proof: `AppPromotion::TotalStaked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn stake() -> Weight { // Proof Size summary in bytes: // Measured: `389` // Estimated: `4764` - // Minimum execution time: 21_088_000 picoseconds. - Weight::from_parts(21_639_000, 4764) + // Minimum execution time: 46_889_000 picoseconds. + Weight::from_parts(47_549_000, 4764) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Configuration AppPromomotionConfigurationOverride (r:1 w:0) - /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) - /// Storage: AppPromotion PendingUnstake (r:1 w:1) - /// Proof: AppPromotion PendingUnstake (max_values: None, max_size: Some(157), added: 2632, mode: MaxEncodedLen) - /// Storage: AppPromotion Staked (r:11 w:10) - /// Proof: AppPromotion Staked (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) - /// Storage: AppPromotion TotalStaked (r:1 w:1) - /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: AppPromotion StakesPerAccount (r:0 w:1) - /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Configuration::AppPromomotionConfigurationOverride` (r:1 w:0) + /// Proof: `Configuration::AppPromomotionConfigurationOverride` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::PendingUnstake` (r:1 w:1) + /// Proof: `AppPromotion::PendingUnstake` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::Staked` (r:11 w:10) + /// Proof: `AppPromotion::Staked` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::TotalStaked` (r:1 w:1) + /// Proof: `AppPromotion::TotalStaked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::StakesPerAccount` (r:0 w:1) + /// Proof: `AppPromotion::StakesPerAccount` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn unstake_all() -> Weight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 42_086_000 picoseconds. - Weight::from_parts(43_149_000, 29095) + // Minimum execution time: 63_069_000 picoseconds. + Weight::from_parts(64_522_000, 29095) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } - /// Storage: Configuration AppPromomotionConfigurationOverride (r:1 w:0) - /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) - /// Storage: AppPromotion PendingUnstake (r:1 w:1) - /// Proof: AppPromotion PendingUnstake (max_values: None, max_size: Some(157), added: 2632, mode: MaxEncodedLen) - /// Storage: AppPromotion Staked (r:11 w:10) - /// Proof: AppPromotion Staked (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) - /// Storage: AppPromotion TotalStaked (r:1 w:1) - /// Proof: AppPromotion TotalStaked (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: AppPromotion StakesPerAccount (r:1 w:1) - /// Proof: AppPromotion StakesPerAccount (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Configuration::AppPromomotionConfigurationOverride` (r:1 w:0) + /// Proof: `Configuration::AppPromomotionConfigurationOverride` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::PendingUnstake` (r:1 w:1) + /// Proof: `AppPromotion::PendingUnstake` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::Staked` (r:11 w:10) + /// Proof: `AppPromotion::Staked` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::TotalStaked` (r:1 w:1) + /// Proof: `AppPromotion::TotalStaked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `AppPromotion::StakesPerAccount` (r:1 w:1) + /// Proof: `AppPromotion::StakesPerAccount` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn unstake_partial() -> Weight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 46_458_000 picoseconds. - Weight::from_parts(47_333_000, 29095) + // Minimum execution time: 84_649_000 picoseconds. + Weight::from_parts(86_173_000, 29095) .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } - /// Storage: AppPromotion Admin (r:1 w:0) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:1 w:0) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn sponsor_collection() -> Weight { // Proof Size summary in bytes: // Measured: `1060` // Estimated: `4325` - // Minimum execution time: 12_827_000 picoseconds. - Weight::from_parts(13_610_000, 4325) + // Minimum execution time: 24_396_000 picoseconds. + Weight::from_parts(24_917_000, 4325) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: AppPromotion Admin (r:1 w:0) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:1 w:0) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn stop_sponsoring_collection() -> Weight { // Proof Size summary in bytes: // Measured: `1092` // Estimated: `4325` - // Minimum execution time: 11_899_000 picoseconds. - Weight::from_parts(12_303_000, 4325) + // Minimum execution time: 22_412_000 picoseconds. + Weight::from_parts(23_033_000, 4325) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: AppPromotion Admin (r:1 w:0) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: EvmContractHelpers Sponsoring (r:0 w:1) - /// Proof: EvmContractHelpers Sponsoring (max_values: None, max_size: Some(62), added: 2537, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:1 w:0) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `EvmContractHelpers::Sponsoring` (r:0 w:1) + /// Proof: `EvmContractHelpers::Sponsoring` (`max_values`: None, `max_size`: Some(62), added: 2537, mode: `MaxEncodedLen`) fn sponsor_contract() -> Weight { // Proof Size summary in bytes: // Measured: `198` // Estimated: `1517` - // Minimum execution time: 10_226_000 picoseconds. - Weight::from_parts(10_549_000, 1517) + // Minimum execution time: 21_621_000 picoseconds. + Weight::from_parts(22_041_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: AppPromotion Admin (r:1 w:0) - /// Proof: AppPromotion Admin (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: EvmContractHelpers Sponsoring (r:1 w:1) - /// Proof: EvmContractHelpers Sponsoring (max_values: None, max_size: Some(62), added: 2537, mode: MaxEncodedLen) + /// Storage: `AppPromotion::Admin` (r:1 w:0) + /// Proof: `AppPromotion::Admin` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `EvmContractHelpers::Sponsoring` (r:1 w:1) + /// Proof: `EvmContractHelpers::Sponsoring` (`max_values`: None, `max_size`: Some(62), added: 2537, mode: `MaxEncodedLen`) fn stop_sponsoring_contract() -> Weight { // Proof Size summary in bytes: // Measured: `396` // Estimated: `3527` - // Minimum execution time: 10_528_000 picoseconds. - Weight::from_parts(10_842_000, 3527) + // Minimum execution time: 19_186_000 picoseconds. + Weight::from_parts(19_616_000, 3527) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/maintenance/src/benchmarking.rs b/pallets/maintenance/src/benchmarking.rs index 4dba0cf6fb..a26ee5f16f 100644 --- a/pallets/maintenance/src/benchmarking.rs +++ b/pallets/maintenance/src/benchmarking.rs @@ -15,9 +15,8 @@ // along with Unique Network. If not, see . use frame_benchmarking::v2::*; -use frame_support::{ensure, pallet_prelude::Weight, traits::StorePreimage}; +use frame_support::ensure; use frame_system::RawOrigin; -use parity_scale_codec::Encode; use sp_std::vec; use super::*; @@ -48,26 +47,4 @@ mod benchmarks { Ok(()) } - - // TODO: fix - // #[pov_mode = MaxEncodedLen { - // // PoV size is deducted from weight_bound - // Preimage::PreimageFor: Measured - // }] - #[benchmark] - fn execute_preimage() -> Result<(), BenchmarkError> { - let call = ::RuntimeCall::from(frame_system::Call::::remark { - remark: 1u32.encode(), - }); - let hash = T::Preimages::note(call.encode().into())?; - - #[extrinsic_call] - _( - RawOrigin::Root, - hash, - Weight::from_parts(100000000000, 100000000000), - ); - - Ok(()) - } } diff --git a/pallets/maintenance/src/lib.rs b/pallets/maintenance/src/lib.rs index 51cb9e6212..91ef821b2e 100644 --- a/pallets/maintenance/src/lib.rs +++ b/pallets/maintenance/src/lib.rs @@ -32,7 +32,6 @@ pub mod pallet { traits::{EnsureOrigin, QueryPreimage, StorePreimage}, }; use frame_system::pallet_prelude::*; - use sp_core::H256; use sp_runtime::traits::Dispatchable; use crate::weights::WeightInfo; @@ -103,49 +102,5 @@ pub mod pallet { Ok(()) } - - /// Execute a runtime call stored as a preimage. - /// - /// `weight_bound` is the maximum weight that the caller is willing - /// to allow the extrinsic to be executed with. - #[pallet::call_index(2)] - #[pallet::weight(::WeightInfo::execute_preimage() + *weight_bound)] - pub fn execute_preimage( - origin: OriginFor, - hash: H256, - weight_bound: Weight, - ) -> DispatchResultWithPostInfo { - use parity_scale_codec::Decode; - - T::PreimageOrigin::ensure_origin(origin.clone())?; - - let data = T::Preimages::fetch(&hash, None)?; - weight_bound.set_proof_size( - weight_bound - .proof_size() - .checked_sub( - data.len() - .try_into() - .map_err(|_| DispatchError::Corruption)?, - ) - .ok_or(DispatchError::Exhausted)?, - ); - - let call = ::RuntimeCall::decode(&mut &data[..]) - .map_err(|_| DispatchError::Corruption)?; - - ensure!( - call.get_dispatch_info().weight.all_lte(weight_bound), - DispatchError::Exhausted - ); - - match call.dispatch(origin) { - Ok(_) => Ok(Pays::No.into()), - Err(error_and_info) => Err(DispatchErrorWithPostInfo { - post_info: Pays::No.into(), - error: error_and_info.error, - }), - } - } } } diff --git a/pallets/maintenance/src/weights.rs b/pallets/maintenance/src/weights.rs index 526fa8cc12..82fd8286ea 100644 --- a/pallets/maintenance/src/weights.rs +++ b/pallets/maintenance/src/weights.rs @@ -35,7 +35,6 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn enable() -> Weight; fn disable() -> Weight; - fn execute_preimage() -> Weight; } /// Weights for pallet_maintenance using the Substrate node and recommended hardware. @@ -61,18 +60,6 @@ impl WeightInfo for SubstrateWeight { Weight::from_parts(3_111_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:1 w:0) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) - fn execute_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `209` - // Estimated: `3674` - // Minimum execution time: 7_359_000 picoseconds. - Weight::from_parts(7_613_000, 3674) - .saturating_add(T::DbWeight::get().reads(2_u64)) - } } // For backwards compatibility and tests @@ -97,17 +84,5 @@ impl WeightInfo for () { Weight::from_parts(3_111_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:1 w:0) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) - fn execute_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `209` - // Estimated: `3674` - // Minimum execution time: 7_359_000 picoseconds. - Weight::from_parts(7_613_000, 3674) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - } } diff --git a/tests/src/maintenance.seqtest.ts b/tests/src/maintenance.seqtest.ts index fb8d795d61..3d8fa7ff5b 100644 --- a/tests/src/maintenance.seqtest.ts +++ b/tests/src/maintenance.seqtest.ts @@ -283,87 +283,6 @@ describe('Integration Test: Maintenance Functionality', () => { }); }); - describe('Preimage Execution', () => { - const preimageHashes: string[] = []; - - before(async function() { - await usingPlaygrounds(async (helper) => { - requirePalletsOrSkip(this, helper, [Pallets.Preimage, Pallets.Maintenance]); - - // create a preimage to be operated with in the tests - const randomAccounts = await helper.arrange.createCrowd(10, 0n, superuser); - const randomIdentities = randomAccounts.map((acc, i) => [ - acc.address, { - deposit: 0n, - judgements: [], - info: { - display: { - raw: `Random Account #${i}`, - }, - }, - }, - ]); - const preimage = helper.constructApiCall('api.tx.identity.forceInsertIdentities', [randomIdentities]).method.toHex(); - preimageHashes.push(await helper.preimage.notePreimage(bob, preimage, true)); - }); - }); - - itSub('Successfully executes call in a preimage', async ({helper}) => { - const result = await expect(helper.getSudo().executeExtrinsic(superuser, 'api.tx.maintenance.executePreimage', [ - preimageHashes[0], {refTime: 10000000000, proofSize: 10000}, - ])).to.be.fulfilled; - - // preimage is executed, and an appropriate event is present - const events = result.result.events.filter((x: any) => x.event.method === 'IdentitiesInserted' && x.event.section === 'identity'); - expect(events.length).to.be.equal(1); - - // the preimage goes back to being unrequested - expect(await helper.preimage.getPreimageInfo(preimageHashes[0])).to.have.property('unrequested'); - }); - - itSub('Does not allow execution of a preimage that would fail', async ({helper}) => { - const [zeroAccount] = await helper.arrange.createAccounts([0n], superuser); - - const preimage = helper.constructApiCall('api.tx.balances.forceTransfer', [ - {Id: zeroAccount.address}, {Id: superuser.address}, 1000n, - ]).method.toHex(); - const preimageHash = await helper.preimage.notePreimage(bob, preimage, true); - preimageHashes.push(preimageHash); - - await expect(helper.getSudo().executeExtrinsic(superuser, 'api.tx.maintenance.executePreimage', [ - preimageHash, {refTime: 10000000000, proofSize: 10000}, - ])).to.be.rejectedWith(/^Token: FundsUnavailable$/); - }); - - itSub('Does not allow preimage execution with non-root', async ({helper}) => { - await expect(helper.executeExtrinsic(bob, 'api.tx.maintenance.executePreimage', [ - preimageHashes[0], {refTime: 10000000000, proofSize: 10000}, - ])).to.be.rejectedWith(/^Misc: BadOrigin$/); - }); - - itSub('Does not allow execution of non-existent preimages', async ({helper}) => { - await expect(helper.getSudo().executeExtrinsic(superuser, 'api.tx.maintenance.executePreimage', [ - '0x1010101010101010101010101010101010101010101010101010101010101010', {refTime: 10000000000, proofSize: 10000}, - ])).to.be.rejectedWith(/^Misc: Unavailable$/); - }); - - itSub('Does not allow preimage execution with less than minimum weights', async ({helper}) => { - await expect(helper.getSudo().executeExtrinsic(superuser, 'api.tx.maintenance.executePreimage', [ - preimageHashes[0], {refTime: 1000, proofSize: 100}, - ])).to.be.rejectedWith(/^Misc: Exhausted$/); - }); - - after(async function() { - await usingPlaygrounds(async (helper) => { - if(helper.fetchMissingPalletNames([Pallets.Preimage, Pallets.Maintenance]).length != 0) return; - - for(const hash of preimageHashes) { - await helper.preimage.unnotePreimage(bob, hash); - } - }); - }); - }); - describe('Integration Test: Maintenance mode & App Promo', () => { let superuser: IKeyringPair; From 437764b749e77be70fb1d13b070030bac87551e7 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Thu, 5 Oct 2023 21:17:44 +0200 Subject: [PATCH 119/143] refactor: property writer / fix set_token_props weight chore: temporary bench common/nft/rft fix: std features for up-common refactor: account nesting_budget in common weights chore: revert nesting_budget logic fix: nesting_budget weight --- pallets/balances-adapter/src/common.rs | 30 - pallets/common/src/benchmarking.rs | 14 +- pallets/common/src/lib.rs | 579 ++++++++++--------- pallets/common/src/weights.rs | 60 +- pallets/evm-coder-substrate/src/lib.rs | 2 +- pallets/foreign-assets/src/impl_fungibles.rs | 6 +- pallets/fungible/src/common.rs | 55 +- pallets/fungible/src/erc.rs | 90 +-- pallets/nonfungible/src/benchmarking.rs | 77 ++- pallets/nonfungible/src/common.rs | 52 +- pallets/nonfungible/src/erc.rs | 137 ++--- pallets/nonfungible/src/lib.rs | 51 +- pallets/nonfungible/src/weights.rs | 241 ++++---- pallets/refungible/src/benchmarking.rs | 80 ++- pallets/refungible/src/common.rs | 67 +-- pallets/refungible/src/erc.rs | 180 +++--- pallets/refungible/src/erc_token.rs | 91 +-- pallets/refungible/src/lib.rs | 4 +- pallets/refungible/src/weights.rs | 297 +++++----- pallets/structure/src/lib.rs | 22 +- pallets/unique/Cargo.toml | 4 + pallets/unique/src/lib.rs | 105 +++- primitives/common/src/constants.rs | 1 + primitives/data-structs/src/budget.rs | 4 +- runtime/common/config/pallets/mod.rs | 1 + runtime/common/runtime_apis.rs | 2 +- runtime/common/weights/mod.rs | 20 +- 27 files changed, 1171 insertions(+), 1101 deletions(-) diff --git a/pallets/balances-adapter/src/common.rs b/pallets/balances-adapter/src/common.rs index c81a837919..b03a95124b 100644 --- a/pallets/balances-adapter/src/common.rs +++ b/pallets/balances-adapter/src/common.rs @@ -30,18 +30,10 @@ impl CommonWeightInfo for CommonWeights { Weight::default() } - fn delete_collection_properties(_amount: u32) -> Weight { - Weight::default() - } - fn set_token_properties(_amount: u32) -> Weight { Weight::default() } - fn delete_token_properties(_amount: u32) -> Weight { - Weight::default() - } - fn set_token_property_permissions(_amount: u32) -> Weight { Weight::default() } @@ -66,18 +58,6 @@ impl CommonWeightInfo for CommonWeights { Weight::default() } - fn burn_recursively_self_raw() -> Weight { - Weight::default() - } - - fn burn_recursively_breadth_raw(_amount: u32) -> Weight { - Weight::default() - } - - fn token_owner() -> Weight { - Weight::default() - } - fn set_allowance_for_all() -> Weight { Weight::default() } @@ -128,16 +108,6 @@ impl CommonCollectionOperations for NativeFungibleHandle { fail!(>::UnsupportedOperation); } - fn burn_item_recursively( - &self, - _sender: ::CrossAccountId, - _token: TokenId, - _self_budget: &dyn up_data_structs::budget::Budget, - _breadth_budget: &dyn up_data_structs::budget::Budget, - ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); - } - fn set_collection_properties( &self, _sender: ::CrossAccountId, diff --git a/pallets/common/src/benchmarking.rs b/pallets/common/src/benchmarking.rs index 1d013e552e..1945de7469 100644 --- a/pallets/common/src/benchmarking.rs +++ b/pallets/common/src/benchmarking.rs @@ -34,7 +34,7 @@ use up_data_structs::{ MAX_TOKEN_PREFIX_LENGTH, }; -use crate::{CollectionHandle, Config, Pallet}; +use crate::{BenchmarkPropertyWriter, CollectionHandle, Config, Pallet}; const SEED: u32 = 1; @@ -126,16 +126,6 @@ fn create_collection( ) } -pub fn load_is_admin_and_property_permissions( - collection: &CollectionHandle, - sender: &T::CrossAccountId, -) -> (bool, PropertiesPermissionMap) { - ( - collection.is_owner_or_admin(sender), - >::property_permissions(collection.id), - ) -} - /// Helper macros, which handles all benchmarking preparation in semi-declarative way /// /// `name` is a substrate account @@ -272,7 +262,7 @@ mod benchmarks { #[block] { - load_is_admin_and_property_permissions(&collection, &sender); + >::load_collection_info(&&collection, &sender); } Ok(()) diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index 23d605497e..8248a84329 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -872,7 +872,7 @@ pub mod pallet { } /// Value representation with delayed initialization time. -pub struct LazyValue T> { +pub struct LazyValue { value: Option, f: Option, } @@ -1902,7 +1902,9 @@ pub trait CommonWeightInfo { /// Collection property deletion weight. /// /// * `amount`- The number of properties to set. - fn delete_collection_properties(amount: u32) -> Weight; + fn delete_collection_properties(amount: u32) -> Weight { + Self::set_collection_properties(amount) + } /// Token property setting weight. /// @@ -1912,7 +1914,9 @@ pub trait CommonWeightInfo { /// Token property deletion weight. /// /// * `amount`- The number of properties to delete. - fn delete_token_properties(amount: u32) -> Weight; + fn delete_token_properties(amount: u32) -> Weight { + Self::set_token_properties(amount) + } /// Token property permissions set weight. /// @@ -1934,30 +1938,6 @@ pub trait CommonWeightInfo { /// The price of burning a token from another user. fn burn_from() -> Weight; - /// Differs from burn_item in case of Fungible and Refungible, as it should burn - /// whole users's balance. - /// - /// This method shouldn't be used directly, as it doesn't count breadth price, use [burn_recursively](CommonWeightInfo::burn_recursively) instead - fn burn_recursively_self_raw() -> Weight; - - /// Cost of iterating over `amount` children while burning, without counting child burning itself. - /// - /// This method shouldn't be used directly, as it doesn't count depth price, use [burn_recursively](CommonWeightInfo::burn_recursively) instead - fn burn_recursively_breadth_raw(amount: u32) -> Weight; - - /// The price of recursive burning a token. - /// - /// `max_selfs` - The maximum burning weight of the token itself. - /// `max_breadth` - The maximum number of nested tokens to burn. - fn burn_recursively(max_selfs: u32, max_breadth: u32) -> Weight { - Self::burn_recursively_self_raw() - .saturating_mul(max_selfs.max(1) as u64) - .saturating_add(Self::burn_recursively_breadth_raw(max_breadth)) - } - - /// The price of retrieving token owner - fn token_owner() -> Weight; - /// The price of setting approval for all fn set_allowance_for_all() -> Weight; @@ -2029,20 +2009,6 @@ pub trait CommonCollectionOperations { amount: u128, ) -> DispatchResultWithPostInfo; - /// Burn token and all nested tokens recursievly. - /// - /// * `sender` - The user who owns the token. - /// * `token` - Token id that will burned. - /// * `self_budget` - The budget that can be spent on burning tokens. - /// * `breadth_budget` - The budget that can be spent on burning nested tokens. - fn burn_item_recursively( - &self, - sender: T::CrossAccountId, - token: TokenId, - self_budget: &dyn Budget, - breadth_budget: &dyn Budget, - ) -> DispatchResultWithPostInfo; - /// Set collection properties. /// /// * `sender` - Must be either the owner of the collection or its admin. @@ -2374,160 +2340,45 @@ impl From for Error { } } -/// A marker structure that enables the writer implementation -/// to provide the interface to write properties to **newly created** tokens. -pub struct NewTokenPropertyWriter; - -/// A marker structure that enables the writer implementation -/// to provide the interface to write properties to **already existing** tokens. -pub struct ExistingTokenPropertyWriter; - /// The type-safe interface for writing properties (setting or deleting) to tokens. /// It has two distinct implementations for newly created tokens and existing ones. /// /// This type utilizes the lazy evaluation to avoid repeating the computation /// of several performance-heavy or PoV-heavy tasks, /// such as checking the indirect ownership or reading the token property permissions. -pub struct PropertyWriter< - 'a, - T, - Handle, - WriterVariant, - FIsAdmin, - FPropertyPermissions, - FCheckTokenExist, - FGetProperties, -> where - T: Config, - FIsAdmin: FnOnce() -> bool, - FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, -{ +pub struct PropertyWriter<'a, WriterVariant, T, Handle, FIsAdmin, FPropertyPermissions> { collection: &'a Handle, - is_collection_admin: LazyValue, - property_permissions: LazyValue, - check_token_exist: FCheckTokenExist, - get_properties: FGetProperties, + collection_lazy_info: PropertyWriterLazyCollectionInfo, _phantom: PhantomData<(T, WriterVariant)>, } -impl<'a, T, Handle, FIsAdmin, FPropertyPermissions, FCheckTokenExist, FGetProperties> - PropertyWriter< - 'a, - T, - Handle, - NewTokenPropertyWriter, - FIsAdmin, - FPropertyPermissions, - FCheckTokenExist, - FGetProperties, - > where - T: Config, - Handle: CommonCollectionOperations + Deref>, - FIsAdmin: FnOnce() -> bool, - FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, - FCheckTokenExist: Copy + FnOnce(TokenId) -> bool, - FGetProperties: Copy + FnOnce(TokenId) -> TokenProperties, -{ - /// A function to write properties to a **newly created** token. - pub fn write_token_properties( - &mut self, - mint_target_is_sender: bool, - token_id: TokenId, - properties_updates: impl Iterator, - log: evm_coder::ethereum::Log, - ) -> DispatchResult { - self.internal_write_token_properties( - token_id, - properties_updates.map(|p| (p.key, Some(p.value))), - |_| Ok(mint_target_is_sender), - log, - ) - } -} - -impl<'a, T, Handle, FIsAdmin, FPropertyPermissions, FCheckTokenExist, FGetProperties> - PropertyWriter< - 'a, - T, - Handle, - ExistingTokenPropertyWriter, - FIsAdmin, - FPropertyPermissions, - FCheckTokenExist, - FGetProperties, - > where - T: Config, - Handle: CommonCollectionOperations + Deref>, - FIsAdmin: FnOnce() -> bool, - FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, - FCheckTokenExist: Copy + FnOnce(TokenId) -> bool, - FGetProperties: Copy + FnOnce(TokenId) -> TokenProperties, -{ - /// A function to write properties to an **already existing** token. - pub fn write_token_properties( - &mut self, - sender: &T::CrossAccountId, - token_id: TokenId, - properties_updates: impl Iterator)>, - nesting_budget: &dyn Budget, - log: evm_coder::ethereum::Log, - ) -> DispatchResult { - self.internal_write_token_properties( - token_id, - properties_updates, - |collection| collection.check_token_indirect_owner(token_id, sender, nesting_budget), - log, - ) - } -} - -impl< - 'a, - T, - Handle, - WriterVariant, - FIsAdmin, - FPropertyPermissions, - FCheckTokenExist, - FGetProperties, - > - PropertyWriter< - 'a, - T, - Handle, - WriterVariant, - FIsAdmin, - FPropertyPermissions, - FCheckTokenExist, - FGetProperties, - > where +impl<'a, T, Handle, WriterVariant, FIsAdmin, FPropertyPermissions> + PropertyWriter<'a, WriterVariant, T, Handle, FIsAdmin, FPropertyPermissions> +where T: Config, Handle: CommonCollectionOperations + Deref>, FIsAdmin: FnOnce() -> bool, FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, - FCheckTokenExist: Copy + FnOnce(TokenId) -> bool, - FGetProperties: Copy + FnOnce(TokenId) -> TokenProperties, { - fn internal_write_token_properties( + fn internal_write_token_properties( &mut self, token_id: TokenId, + mut token_lazy_info: PropertyWriterLazyTokenInfo< + FCheckTokenExist, + FCheckTokenOwner, + FGetProperties, + >, properties_updates: impl Iterator)>, - check_token_owner: FCheckTokenOwner, log: evm_coder::ethereum::Log, ) -> DispatchResult where - FCheckTokenOwner: FnOnce(&Handle) -> Result, + FCheckTokenExist: FnOnce() -> bool, + FCheckTokenOwner: FnOnce() -> Result, + FGetProperties: FnOnce() -> TokenProperties, { - let get_properties = self.get_properties; - let mut stored_properties = LazyValue::new(move || get_properties(token_id)); - - let mut is_token_owner = LazyValue::new(|| check_token_owner(self.collection)); - - let check_token_exist = self.check_token_exist; - let mut is_token_exist = LazyValue::new(move || check_token_exist(token_id)); - for (key, value) in properties_updates { let permission = self + .collection_lazy_info .property_permissions .value() .get(&key) @@ -2536,7 +2387,11 @@ impl< match permission { PropertyPermission { mutable: false, .. } - if stored_properties.value().get(&key).is_some() => + if token_lazy_info + .stored_properties + .value() + .get(&key) + .is_some() => { return Err(>::NoPermission.into()); } @@ -2548,15 +2403,16 @@ impl< } => check_token_permissions::( collection_admin, token_owner, - &mut self.is_collection_admin, - &mut is_token_owner, - &mut is_token_exist, + &mut self.collection_lazy_info.is_collection_admin, + &mut token_lazy_info.is_token_owner, + &mut token_lazy_info.is_token_exist, )?, } match value { Some(value) => { - stored_properties + token_lazy_info + .stored_properties .value_mut() .try_set(key.clone(), value) .map_err(>::from)?; @@ -2568,7 +2424,8 @@ impl< )); } None => { - stored_properties + token_lazy_info + .stored_properties .value_mut() .remove(&key) .map_err(>::from)?; @@ -2582,142 +2439,330 @@ impl< } } - let properties_changed = stored_properties.has_value(); + let properties_changed = token_lazy_info.stored_properties.has_value(); if properties_changed { >::deposit_log(log); self.collection - .set_token_properties_raw(token_id, stored_properties.into_inner()); + .set_token_properties_raw(token_id, token_lazy_info.stored_properties.into_inner()); } Ok(()) } } -/// Create a [`PropertyWriter`] for newly created tokens. -pub fn property_writer_for_new_token<'a, T, Handle>( - collection: &'a Handle, - sender: &'a T::CrossAccountId, -) -> PropertyWriter< - 'a, - T, - Handle, - NewTokenPropertyWriter, - impl FnOnce() -> bool + 'a, - impl FnOnce() -> PropertiesPermissionMap + 'a, - impl Copy + FnOnce(TokenId) -> bool + 'a, - impl Copy + FnOnce(TokenId) -> TokenProperties + 'a, -> +/// A helper structure for the [`PropertyWriter`] that holds +/// the collection-related info. The info is loaded using lazy evaluation. +/// This info is common for any token for which we write properties. +pub struct PropertyWriterLazyCollectionInfo { + is_collection_admin: LazyValue, + property_permissions: LazyValue, +} + +/// A helper structure for the [`PropertyWriter`] that holds +/// the token-related info. The info is loaded using lazy evaluation. +pub struct PropertyWriterLazyTokenInfo { + is_token_exist: LazyValue, + is_token_owner: LazyValue, FCheckTokenOwner>, + stored_properties: LazyValue, +} + +impl + PropertyWriterLazyTokenInfo +where + FCheckTokenExist: FnOnce() -> bool, + FCheckTokenOwner: FnOnce() -> Result, + FGetProperties: FnOnce() -> TokenProperties, +{ + /// Create a lazy token info. + pub fn new( + check_token_exist: FCheckTokenExist, + check_token_owner: FCheckTokenOwner, + get_token_properties: FGetProperties, + ) -> Self { + Self { + is_token_exist: LazyValue::new(check_token_exist), + is_token_owner: LazyValue::new(check_token_owner), + stored_properties: LazyValue::new(get_token_properties), + } + } +} + +/// A marker structure that enables the writer implementation +/// to provide the interface to write properties to **newly created** tokens. +pub struct NewTokenPropertyWriter(PhantomData); +impl NewTokenPropertyWriter { + /// Creates a [`PropertyWriter`] for **newly created** tokens. + pub fn new<'a, Handle>( + collection: &'a Handle, + sender: &'a T::CrossAccountId, + ) -> PropertyWriter< + 'a, + Self, + T, + Handle, + impl FnOnce() -> bool + 'a, + impl FnOnce() -> PropertiesPermissionMap + 'a, + > + where + T: Config, + Handle: CommonCollectionOperations + Deref>, + { + PropertyWriter { + collection, + collection_lazy_info: PropertyWriterLazyCollectionInfo { + is_collection_admin: LazyValue::new(|| collection.is_owner_or_admin(sender)), + property_permissions: LazyValue::new(|| { + >::property_permissions(collection.id) + }), + }, + _phantom: PhantomData, + } + } +} + +impl<'a, T, Handle, FIsAdmin, FPropertyPermissions> + PropertyWriter<'a, NewTokenPropertyWriter, T, Handle, FIsAdmin, FPropertyPermissions> where T: Config, Handle: CommonCollectionOperations + Deref>, + FIsAdmin: FnOnce() -> bool, + FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, { - PropertyWriter { - collection, - is_collection_admin: LazyValue::new(|| collection.is_owner_or_admin(sender)), - property_permissions: LazyValue::new(|| >::property_permissions(collection.id)), - check_token_exist: |token_id| { - debug_assert!(collection.token_exists(token_id)); + /// A function to write properties to a **newly created** token. + pub fn write_token_properties( + &mut self, + mint_target_is_sender: bool, + token_id: TokenId, + properties_updates: impl Iterator, + log: evm_coder::ethereum::Log, + ) -> DispatchResult { + let check_token_exist = || { + debug_assert!(self.collection.token_exists(token_id)); true - }, - get_properties: |token_id| { - debug_assert!(collection.get_token_properties_raw(token_id).is_none()); + }; + + let check_token_owner = || Ok(mint_target_is_sender); + + let get_token_properties = || { + debug_assert!(self.collection.get_token_properties_raw(token_id).is_none()); TokenProperties::new() - }, - _phantom: PhantomData, + }; + + self.internal_write_token_properties( + token_id, + PropertyWriterLazyTokenInfo::new( + check_token_exist, + check_token_owner, + get_token_properties, + ), + properties_updates.map(|p| (p.key, Some(p.value))), + log, + ) } } -#[cfg(feature = "runtime-benchmarks")] -/// Create a `PropertyWriter` with preloaded `is_collection_admin` and `property_permissions. -/// Also: -/// * it will return `true` for the token ownership check. -/// * it will return empty stored properties without reading them from the storage. -pub fn collection_info_loaded_property_writer( - collection: &Handle, - is_collection_admin: bool, - property_permissions: PropertiesPermissionMap, -) -> PropertyWriter< - T, - Handle, - NewTokenPropertyWriter, - impl FnOnce() -> bool, - impl FnOnce() -> PropertiesPermissionMap, - impl Copy + FnOnce(TokenId) -> bool, - impl Copy + FnOnce(TokenId) -> TokenProperties, -> +/// A marker structure that enables the writer implementation +/// to provide the interface to write properties to **already existing** tokens. +pub struct ExistingTokenPropertyWriter(PhantomData); +impl ExistingTokenPropertyWriter { + /// Creates a [`PropertyWriter`] for **already existing** tokens. + pub fn new<'a, Handle>( + collection: &'a Handle, + sender: &'a T::CrossAccountId, + ) -> PropertyWriter< + 'a, + Self, + T, + Handle, + impl FnOnce() -> bool + 'a, + impl FnOnce() -> PropertiesPermissionMap + 'a, + > + where + Handle: CommonCollectionOperations + Deref>, + { + PropertyWriter { + collection, + collection_lazy_info: PropertyWriterLazyCollectionInfo { + is_collection_admin: LazyValue::new(|| collection.is_owner_or_admin(sender)), + property_permissions: LazyValue::new(|| { + >::property_permissions(collection.id) + }), + }, + _phantom: PhantomData, + } + } +} + +impl<'a, T, Handle, FIsAdmin, FPropertyPermissions> + PropertyWriter<'a, ExistingTokenPropertyWriter, T, Handle, FIsAdmin, FPropertyPermissions> where T: Config, Handle: CommonCollectionOperations + Deref>, + FIsAdmin: FnOnce() -> bool, + FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, { - PropertyWriter { - collection, - is_collection_admin: LazyValue::new(move || is_collection_admin), - property_permissions: LazyValue::new(move || property_permissions), - check_token_exist: |_token_id| true, - get_properties: |_token_id| TokenProperties::new(), - _phantom: PhantomData, + /// A function to write properties to an **already existing** token. + pub fn write_token_properties( + &mut self, + sender: &T::CrossAccountId, + token_id: TokenId, + properties_updates: impl Iterator)>, + nesting_budget: &dyn Budget, + log: evm_coder::ethereum::Log, + ) -> DispatchResult { + let check_token_exist = || self.collection.token_exists(token_id); + let check_token_owner = || { + self.collection + .check_token_indirect_owner(token_id, sender, nesting_budget) + }; + let get_token_properties = || { + self.collection + .get_token_properties_raw(token_id) + .unwrap_or_default() + }; + + self.internal_write_token_properties( + token_id, + PropertyWriterLazyTokenInfo::new( + check_token_exist, + check_token_owner, + get_token_properties, + ), + properties_updates, + log, + ) } } -/// Create a [`PropertyWriter`] for already existing tokens. -pub fn property_writer_for_existing_token<'a, T, Handle>( - collection: &'a Handle, - sender: &'a T::CrossAccountId, -) -> PropertyWriter< - 'a, - T, - Handle, - ExistingTokenPropertyWriter, - impl FnOnce() -> bool + 'a, - impl FnOnce() -> PropertiesPermissionMap + 'a, - impl Copy + FnOnce(TokenId) -> bool + 'a, - impl Copy + FnOnce(TokenId) -> TokenProperties + 'a, -> +/// A marker structure that enables the writer implementation +/// to benchmark the token properties writing. +#[cfg(feature = "runtime-benchmarks")] +pub struct BenchmarkPropertyWriter(PhantomData); + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkPropertyWriter { + /// Creates a [`PropertyWriter`] for benchmarking tokens properties writing. + pub fn new<'a, Handle, FIsAdmin, FPropertyPermissions>( + collection: &Handle, + collection_lazy_info: PropertyWriterLazyCollectionInfo, + ) -> PropertyWriter + where + Handle: CommonCollectionOperations + Deref>, + FIsAdmin: FnOnce() -> bool, + FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, + { + PropertyWriter { + collection, + collection_lazy_info, + _phantom: PhantomData, + } + } + + /// Load the [`PropertyWriterLazyCollectionInfo`] from the storage. + pub fn load_collection_info( + collection_handle: &Handle, + sender: &T::CrossAccountId, + ) -> PropertyWriterLazyCollectionInfo< + impl FnOnce() -> bool, + impl FnOnce() -> PropertiesPermissionMap, + > + where + Handle: Deref>, + { + let is_collection_admin = collection_handle.is_owner_or_admin(sender); + let property_permissions = >::property_permissions(collection_handle.id); + + PropertyWriterLazyCollectionInfo { + is_collection_admin: LazyValue::new(move || is_collection_admin), + property_permissions: LazyValue::new(move || property_permissions), + } + } + + /// Load the [`PropertyWriterLazyTokenInfo`] with token properties from the storage. + pub fn load_token_properties( + collection: &Handle, + token_id: TokenId, + ) -> PropertyWriterLazyTokenInfo< + impl FnOnce() -> bool, + impl FnOnce() -> Result, + impl FnOnce() -> TokenProperties, + > + where + Handle: CommonCollectionOperations, + { + let stored_properties = collection + .get_token_properties_raw(token_id) + .unwrap_or_default(); + + PropertyWriterLazyTokenInfo { + is_token_exist: LazyValue::new(|| true), + is_token_owner: LazyValue::new(|| Ok(true)), + stored_properties: LazyValue::new(move || stored_properties), + } + } +} + +#[cfg(feature = "runtime-benchmarks")] +impl<'a, T, Handle, FIsAdmin, FPropertyPermissions> + PropertyWriter<'a, BenchmarkPropertyWriter, T, Handle, FIsAdmin, FPropertyPermissions> where T: Config, Handle: CommonCollectionOperations + Deref>, + FIsAdmin: FnOnce() -> bool, + FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, { - PropertyWriter { - collection, - is_collection_admin: LazyValue::new(|| collection.is_owner_or_admin(sender)), - property_permissions: LazyValue::new(|| >::property_permissions(collection.id)), - check_token_exist: |token_id| collection.token_exists(token_id), - get_properties: |token_id| { - collection - .get_token_properties_raw(token_id) - .unwrap_or_default() - }, - _phantom: PhantomData, + /// A function to benchmark the writing of token properties. + pub fn write_token_properties( + &mut self, + token_id: TokenId, + properties_updates: impl Iterator, + log: evm_coder::ethereum::Log, + ) -> DispatchResult { + let check_token_exist = || true; + let check_token_owner = || Ok(true); + let get_token_properties = || TokenProperties::new(); + + self.internal_write_token_properties( + token_id, + PropertyWriterLazyTokenInfo::new( + check_token_exist, + check_token_owner, + get_token_properties, + ), + properties_updates.map(|p| (p.key, Some(p.value))), + log, + ) } } -/// Computes the weight delta for newly created tokens with properties. +/// Computes the weight of writing properties to tokens. /// * `properties_nums` - The properties num of each created token. -/// * `init_token_properties` - The function to obtain the weight from a token's properties num. -pub fn init_token_properties_delta Weight>( +/// * `per_token_weight_weight` - The function to obtain the weight +/// of writing properties from a token's properties num. +pub fn write_token_properties_total_weight Weight>( properties_nums: impl Iterator, - init_token_properties: I, + per_token_weight: I, ) -> Weight { - let mut delta = properties_nums + let mut weight = properties_nums .filter_map(|properties_num| { if properties_num > 0 { - Some(init_token_properties(properties_num)) + Some(per_token_weight(properties_num)) } else { None } }) .fold(Weight::zero(), |a, b| a.saturating_add(b)); - // If at least once the `init_token_properties` was called, - // it means at least one newly created token has properties. - // Becuase of that, some common collection data also was loaded and we need to add this weight. - // However, these common data was loaded only once which is guaranteed by the `PropertyWriter`. - if !delta.is_zero() { - delta = delta.saturating_add(>::init_token_properties_common()) + if !weight.is_zero() { + // If we are here, it means the token properties were written at least once. + // Because of that, some common collection data was also loaded; we must add this weight. + // However, this common data was loaded only once, which is guaranteed by the `PropertyWriter`. + + weight = weight.saturating_add(>::property_writer_load_collection_info()); } - delta + weight } #[cfg(any(feature = "tests", test))] diff --git a/pallets/common/src/weights.rs b/pallets/common/src/weights.rs index 6dfaa8fd55..8e47211d74 100644 --- a/pallets/common/src/weights.rs +++ b/pallets/common/src/weights.rs @@ -3,9 +3,9 @@ //! Autogenerated weights for pallet_common //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-05, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` +//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/common/src/weights.rs @@ -36,7 +36,7 @@ pub trait WeightInfo { fn set_collection_properties(b: u32, ) -> Weight; fn delete_collection_properties(b: u32, ) -> Weight; fn check_accesslist() -> Weight; - fn init_token_properties_common() -> Weight; + fn property_writer_load_collection_info() -> Weight; } /// Weights for pallet_common using the Substrate node and recommended hardware. @@ -49,10 +49,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 4_987_000 picoseconds. - Weight::from_parts(5_119_000, 44457) - // Standard Error: 7_609 - .saturating_add(Weight::from_parts(5_750_459, 0).saturating_mul(b.into())) + // Minimum execution time: 2_840_000 picoseconds. + Weight::from_parts(1_988_405, 44457) + // Standard Error: 7_834 + .saturating_add(Weight::from_parts(3_053_965, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -63,10 +63,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `303 + b * (33030 ±0)` // Estimated: `44457` - // Minimum execution time: 4_923_000 picoseconds. - Weight::from_parts(5_074_000, 44457) - // Standard Error: 36_651 - .saturating_add(Weight::from_parts(23_145_677, 0).saturating_mul(b.into())) + // Minimum execution time: 2_770_000 picoseconds. + Weight::from_parts(2_940_000, 44457) + // Standard Error: 30_686 + .saturating_add(Weight::from_parts(9_801_835, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -76,20 +76,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 4_271_000 picoseconds. - Weight::from_parts(4_461_000, 3535) + // Minimum execution time: 2_830_000 picoseconds. + Weight::from_parts(2_950_000, 3535) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Common IsAdmin (r:1 w:0) /// Proof: Common IsAdmin (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - fn init_token_properties_common() -> Weight { + fn property_writer_load_collection_info() -> Weight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `20191` - // Minimum execution time: 5_889_000 picoseconds. - Weight::from_parts(6_138_000, 20191) + // Minimum execution time: 3_970_000 picoseconds. + Weight::from_parts(4_140_000, 20191) .saturating_add(T::DbWeight::get().reads(2_u64)) } } @@ -103,10 +103,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 4_987_000 picoseconds. - Weight::from_parts(5_119_000, 44457) - // Standard Error: 7_609 - .saturating_add(Weight::from_parts(5_750_459, 0).saturating_mul(b.into())) + // Minimum execution time: 2_840_000 picoseconds. + Weight::from_parts(1_988_405, 44457) + // Standard Error: 7_834 + .saturating_add(Weight::from_parts(3_053_965, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -117,10 +117,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `303 + b * (33030 ±0)` // Estimated: `44457` - // Minimum execution time: 4_923_000 picoseconds. - Weight::from_parts(5_074_000, 44457) - // Standard Error: 36_651 - .saturating_add(Weight::from_parts(23_145_677, 0).saturating_mul(b.into())) + // Minimum execution time: 2_770_000 picoseconds. + Weight::from_parts(2_940_000, 44457) + // Standard Error: 30_686 + .saturating_add(Weight::from_parts(9_801_835, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -130,20 +130,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 4_271_000 picoseconds. - Weight::from_parts(4_461_000, 3535) + // Minimum execution time: 2_830_000 picoseconds. + Weight::from_parts(2_950_000, 3535) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Common IsAdmin (r:1 w:0) /// Proof: Common IsAdmin (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) /// Storage: Common CollectionPropertyPermissions (r:1 w:0) /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - fn init_token_properties_common() -> Weight { + fn property_writer_load_collection_info() -> Weight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `20191` - // Minimum execution time: 5_889_000 picoseconds. - Weight::from_parts(6_138_000, 20191) + // Minimum execution time: 3_970_000 picoseconds. + Weight::from_parts(4_140_000, 20191) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/pallets/evm-coder-substrate/src/lib.rs b/pallets/evm-coder-substrate/src/lib.rs index dd61808b70..696589c935 100644 --- a/pallets/evm-coder-substrate/src/lib.rs +++ b/pallets/evm-coder-substrate/src/lib.rs @@ -84,7 +84,7 @@ pub struct GasCallsBudget<'r, T: Config> { } impl budget::Budget for GasCallsBudget<'_, T> { fn consume_custom(&self, calls: u32) -> bool { - let (gas, overflown) = (calls as u64).overflowing_add(self.gas_per_call); + let (gas, overflown) = (calls as u64).overflowing_mul(self.gas_per_call); if overflown { return false; } diff --git a/pallets/foreign-assets/src/impl_fungibles.rs b/pallets/foreign-assets/src/impl_fungibles.rs index 59934a4a87..23c167fe7e 100644 --- a/pallets/foreign-assets/src/impl_fungibles.rs +++ b/pallets/foreign-assets/src/impl_fungibles.rs @@ -23,7 +23,7 @@ use frame_system::Config as SystemConfig; use pallet_common::{CollectionHandle, CommonCollectionOperations}; use pallet_fungible::FungibleHandle; use sp_runtime::traits::{CheckedAdd, CheckedSub}; -use up_data_structs::budget::Value; +use up_data_structs::budget; use super::*; @@ -327,7 +327,7 @@ where &collection, &account, amount_data, - &Value::new(0), + &budget::Value::new(0), )?; Ok(amount) @@ -440,7 +440,7 @@ where &T::CrossAccountId::from_sub(source.clone()), &T::CrossAccountId::from_sub(dest.clone()), amount.into(), - &Value::new(0), + &budget::Value::new(0), ) .map_err(|e| e.error)?; diff --git a/pallets/fungible/src/common.rs b/pallets/fungible/src/common.rs index b6bc49c11e..c998d697d2 100644 --- a/pallets/fungible/src/common.rs +++ b/pallets/fungible/src/common.rs @@ -16,14 +16,11 @@ use core::marker::PhantomData; -use frame_support::{ - dispatch::DispatchResultWithPostInfo, ensure, fail, traits::Get, weights::Weight, -}; +use frame_support::{dispatch::DispatchResultWithPostInfo, ensure, fail, weights::Weight}; use pallet_common::{ weights::WeightInfo as _, with_weight, CommonCollectionOperations, CommonWeightInfo, RefungibleExtensions, SelfWeightOf as PalletCommonWeightOf, }; -use pallet_structure::Error as StructureError; use sp_runtime::{ArithmeticError, DispatchError}; use sp_std::{vec, vec::Vec}; use up_data_structs::{ @@ -60,27 +57,19 @@ impl CommonWeightInfo for CommonWeights { >::set_collection_properties(amount) } - fn delete_collection_properties(amount: u32) -> Weight { - >::delete_collection_properties(amount) - } - fn set_token_properties(_amount: u32) -> Weight { // Error Weight::zero() } - fn delete_token_properties(_amount: u32) -> Weight { - // Error - Weight::zero() - } - fn set_token_property_permissions(_amount: u32) -> Weight { // Error Weight::zero() } fn transfer() -> Weight { - >::transfer_raw() + >::check_accesslist() * 2 + >::transfer_raw() + .saturating_add(>::check_accesslist().saturating_mul(2)) } fn approve() -> Weight { @@ -93,28 +82,14 @@ impl CommonWeightInfo for CommonWeights { fn transfer_from() -> Weight { Self::transfer() - + >::check_allowed_raw() - + >::set_allowance_unchecked_raw() + .saturating_add(>::check_allowed_raw()) + .saturating_add(>::set_allowance_unchecked_raw()) } fn burn_from() -> Weight { >::burn_from() } - fn burn_recursively_self_raw() -> Weight { - // Read to get total balance - Self::burn_item() + T::DbWeight::get().reads(1) - } - - fn burn_recursively_breadth_raw(_amount: u32) -> Weight { - // Fungible tokens can't have children - Weight::zero() - } - - fn token_owner() -> Weight { - Weight::zero() - } - fn set_allowance_for_all() -> Weight { Weight::zero() } @@ -203,26 +178,6 @@ impl CommonCollectionOperations for FungibleHandle { ) } - fn burn_item_recursively( - &self, - sender: T::CrossAccountId, - token: TokenId, - self_budget: &dyn Budget, - _breadth_budget: &dyn Budget, - ) -> DispatchResultWithPostInfo { - // Should not happen? - ensure!( - token == TokenId::default(), - >::FungibleItemsHaveNoId - ); - ensure!(self_budget.consume(), >::DepthLimit,); - - with_weight( - >::burn(self, &sender, >::get((self.id, &sender))), - >::burn_recursively_self_raw(), - ) - } - fn transfer( &self, from: T::CrossAccountId, diff --git a/pallets/fungible/src/erc.rs b/pallets/fungible/src/erc.rs index 5ef76c8a4f..9248fa3f7c 100644 --- a/pallets/fungible/src/erc.rs +++ b/pallets/fungible/src/erc.rs @@ -32,12 +32,12 @@ use pallet_evm::{account::CrossAccountId, PrecompileHandle}; use pallet_evm_coder_substrate::{ call, dispatch_to_evm, execution::{PreDispatch, Result}, - frontier_contract, + frontier_contract, SubstrateRecorder, }; use pallet_structure::{weights::WeightInfo as _, SelfWeightOf as StructureWeight}; use sp_core::{Get, U256}; use sp_std::vec::Vec; -use up_data_structs::CollectionMode; +use up_data_structs::{budget::Budget, CollectionMode}; use crate::{ common::CommonWeights, weights::WeightInfo, Allowance, Balance, Config, FungibleHandle, Pallet, @@ -73,6 +73,10 @@ pub struct AmountForAddress { amount: U256, } +fn nesting_budget(recorder: &SubstrateRecorder) -> impl Budget + '_ { + recorder.weight_calls_budget(>::find_parent()) +} + #[solidity_interface(name = ERC20, events(ERC20Events), enum(derive(PreDispatch)), enum_attr(weight), expect_selector = 0x942e8b22)] impl FungibleHandle { fn name(&self) -> Result { @@ -106,11 +110,8 @@ impl FungibleHandle { let caller = T::CrossAccountId::from_eth(caller); let to = T::CrossAccountId::from_eth(to); let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::transfer(self, &caller, &to, amount, &budget) + >::transfer(self, &caller, &to, amount, &nesting_budget(&self.recorder)) .map_err(|e| dispatch_to_evm::(e.error))?; Ok(true) } @@ -127,12 +128,16 @@ impl FungibleHandle { let from = T::CrossAccountId::from_eth(from); let to = T::CrossAccountId::from_eth(to); let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::transfer_from(self, &caller, &from, &to, amount, &budget) - .map_err(|e| dispatch_to_evm::(e.error))?; + >::transfer_from( + self, + &caller, + &from, + &to, + amount, + &nesting_budget(&self.recorder), + ) + .map_err(|e| dispatch_to_evm::(e.error))?; Ok(true) } #[weight(>::approve())] @@ -164,10 +169,8 @@ impl FungibleHandle { let caller = T::CrossAccountId::from_eth(caller); let to = T::CrossAccountId::from_eth(to); let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::create_item(self, &caller, (to, amount), &budget) + + >::create_item(self, &caller, (to, amount), &nesting_budget(&self.recorder)) .map_err(dispatch_to_evm::)?; Ok(true) } @@ -201,10 +204,8 @@ where let caller = T::CrossAccountId::from_eth(caller); let to = to.into_sub_cross_account::()?; let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::create_item(self, &caller, (to, amount), &budget) + + >::create_item(self, &caller, (to, amount), &nesting_budget(&self.recorder)) .map_err(dispatch_to_evm::)?; Ok(true) } @@ -236,12 +237,15 @@ where let caller = T::CrossAccountId::from_eth(caller); let from = T::CrossAccountId::from_eth(from); let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::burn_from(self, &caller, &from, amount, &budget) - .map_err(dispatch_to_evm::)?; + >::burn_from( + self, + &caller, + &from, + amount, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(true) } @@ -260,12 +264,15 @@ where let caller = T::CrossAccountId::from_eth(caller); let from = from.into_sub_cross_account::()?; let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::burn_from(self, &caller, &from, amount, &budget) - .map_err(dispatch_to_evm::)?; + >::burn_from( + self, + &caller, + &from, + amount, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(true) } @@ -274,9 +281,6 @@ where #[weight(>::create_multiple_items_ex(amounts.len() as u32))] fn mint_bulk(&mut self, caller: Caller, amounts: Vec) -> Result { let caller = T::CrossAccountId::from_eth(caller); - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let amounts = amounts .into_iter() .map(|AmountForAddress { to, amount }| { @@ -287,7 +291,7 @@ where }) .collect::>()?; - >::create_multiple_items(self, &caller, amounts, &budget) + >::create_multiple_items(self, &caller, amounts, &nesting_budget(&self.recorder)) .map_err(dispatch_to_evm::)?; Ok(true) } @@ -297,11 +301,9 @@ where let caller = T::CrossAccountId::from_eth(caller); let to = to.into_sub_cross_account::()?; let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::transfer(self, &caller, &to, amount, &budget).map_err(|_| "transfer error")?; + >::transfer(self, &caller, &to, amount, &nesting_budget(&self.recorder)) + .map_err(|_| "transfer error")?; Ok(true) } @@ -317,12 +319,16 @@ where let from = from.into_sub_cross_account::()?; let to = to.into_sub_cross_account::()?; let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::transfer_from(self, &caller, &from, &to, amount, &budget) - .map_err(|e| dispatch_to_evm::(e.error))?; + >::transfer_from( + self, + &caller, + &from, + &to, + amount, + &nesting_budget(&self.recorder), + ) + .map_err(|e| dispatch_to_evm::(e.error))?; Ok(true) } diff --git a/pallets/nonfungible/src/benchmarking.rs b/pallets/nonfungible/src/benchmarking.rs index fa145f53c7..ec02d14b0a 100644 --- a/pallets/nonfungible/src/benchmarking.rs +++ b/pallets/nonfungible/src/benchmarking.rs @@ -262,10 +262,83 @@ mod benchmarks { { >::burn_from(&collection, &burner, &sender, item, &Unlimited)?; } - - Ok(()) } + // set_token_properties { + // let b in 0..MAX_PROPERTIES_PER_ITEM; + // bench_init!{ + // owner: sub; collection: collection(owner); + // owner: cross_from_sub; + // }; + // let perms = (0..b).map(|k| PropertyKeyPermission { + // key: property_key(k as usize), + // permission: PropertyPermission { + // mutable: false, + // collection_admin: true, + // token_owner: true, + // }, + // }).collect::>(); + // >::set_token_property_permissions(&collection, &owner, perms)?; + // let props = (0..b).map(|k| Property { + // key: property_key(k as usize), + // value: property_value(), + // }).collect::>(); + // let item = create_max_item(&collection, &owner, owner.clone())?; + // }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?} + + // load_token_properties { + // bench_init!{ + // owner: sub; collection: collection(owner); + // owner: cross_from_sub; + // }; + + // let item = create_max_item(&collection, &owner, owner.clone())?; + // }: { + // pallet_common::BenchmarkPropertyWriter::::load_token_properties( + // &collection, + // item, + // ) + // } + + // write_token_properties { + // let b in 0..MAX_PROPERTIES_PER_ITEM; + // bench_init!{ + // owner: sub; collection: collection(owner); + // owner: cross_from_sub; + // }; + + // let perms = (0..b).map(|k| PropertyKeyPermission { + // key: property_key(k as usize), + // permission: PropertyPermission { + // mutable: false, + // collection_admin: true, + // token_owner: true, + // }, + // }).collect::>(); + // >::set_token_property_permissions(&collection, &owner, perms)?; + // let props = (0..b).map(|k| Property { + // key: property_key(k as usize), + // value: property_value(), + // }).collect::>(); + // let item = create_max_item(&collection, &owner, owner.clone())?; + + // let lazy_collection_info = pallet_common::BenchmarkPropertyWriter::::load_collection_info( + // &collection, + // &owner, + // ); + // }: { + // let mut property_writer = pallet_common::BenchmarkPropertyWriter::new(&collection, lazy_collection_info); + + // property_writer.write_token_properties( + // item, + // props.into_iter(), + // crate::erc::ERC721TokenEvent::TokenChanged { + // token_id: item.into(), + // } + // .to_log(T::ContractAddress::get()), + // )? + // } + #[benchmark] fn set_token_property_permissions( b: Linear<0, MAX_PROPERTIES_PER_ITEM>, diff --git a/pallets/nonfungible/src/common.rs b/pallets/nonfungible/src/common.rs index 5bdf7f5606..7fe032d30e 100644 --- a/pallets/nonfungible/src/common.rs +++ b/pallets/nonfungible/src/common.rs @@ -18,8 +18,9 @@ use core::marker::PhantomData; use frame_support::{dispatch::DispatchResultWithPostInfo, ensure, fail, weights::Weight}; use pallet_common::{ - init_token_properties_delta, weights::WeightInfo as _, with_weight, CommonCollectionOperations, - CommonWeightInfo, RefungibleExtensions, SelfWeightOf as PalletCommonWeightOf, + weights::WeightInfo as _, with_weight, write_token_properties_total_weight, + CommonCollectionOperations, CommonWeightInfo, RefungibleExtensions, + SelfWeightOf as PalletCommonWeightOf, }; use pallet_structure::Pallet as PalletStructure; use sp_runtime::DispatchError; @@ -39,9 +40,9 @@ impl CommonWeightInfo for CommonWeights { fn create_multiple_items_ex(data: &CreateItemExData) -> Weight { match data { CreateItemExData::NFT(t) => >::create_multiple_items_ex(t.len() as u32) - .saturating_add(init_token_properties_delta::( + .saturating_add(write_token_properties_total_weight::( t.iter().map(|t| t.properties.len() as u32), - >::init_token_properties, + >::write_token_properties, )), _ => Weight::zero(), } @@ -49,12 +50,12 @@ impl CommonWeightInfo for CommonWeights { fn create_multiple_items(data: &[up_data_structs::CreateItemData]) -> Weight { >::create_multiple_items(data.len() as u32).saturating_add( - init_token_properties_delta::( + write_token_properties_total_weight::( data.iter().map(|t| match t { up_data_structs::CreateItemData::NFT(n) => n.properties.len() as u32, _ => 0, }), - >::init_token_properties, + >::write_token_properties, ), ) } @@ -67,16 +68,15 @@ impl CommonWeightInfo for CommonWeights { >::set_collection_properties(amount) } - fn delete_collection_properties(amount: u32) -> Weight { - >::delete_collection_properties(amount) - } - fn set_token_properties(amount: u32) -> Weight { - >::set_token_properties(amount) + write_token_properties_total_weight::([amount].into_iter(), |amount| { + >::load_token_properties() + .saturating_add(>::write_token_properties(amount)) + }) } fn delete_token_properties(amount: u32) -> Weight { - >::delete_token_properties(amount) + Self::set_token_properties(amount) } fn set_token_property_permissions(amount: u32) -> Weight { @@ -84,7 +84,8 @@ impl CommonWeightInfo for CommonWeights { } fn transfer() -> Weight { - >::transfer_raw() + >::check_accesslist() * 2 + >::transfer_raw() + .saturating_add(>::check_accesslist().saturating_mul(2)) } fn approve() -> Weight { @@ -96,26 +97,13 @@ impl CommonWeightInfo for CommonWeights { } fn transfer_from() -> Weight { - Self::transfer() + >::check_allowed_raw() + Self::transfer().saturating_add(>::check_allowed_raw()) } fn burn_from() -> Weight { >::burn_from() } - fn burn_recursively_self_raw() -> Weight { - >::burn_recursively_self_raw() - } - - fn burn_recursively_breadth_raw(amount: u32) -> Weight { - >::burn_recursively_breadth_plus_self_plus_self_per_each_raw(amount) - .saturating_sub(Self::burn_recursively_self_raw().saturating_mul(amount as u64 + 1)) - } - - fn token_owner() -> Weight { - >::token_owner() - } - fn set_allowance_for_all() -> Weight { >::set_allowance_for_all() } @@ -308,16 +296,6 @@ impl CommonCollectionOperations for NonfungibleHandle { } } - fn burn_item_recursively( - &self, - sender: T::CrossAccountId, - token: TokenId, - self_budget: &dyn Budget, - breadth_budget: &dyn Budget, - ) -> DispatchResultWithPostInfo { - >::burn_recursively(self, &sender, token, self_budget, breadth_budget) - } - fn transfer( &self, from: T::CrossAccountId, diff --git a/pallets/nonfungible/src/erc.rs b/pallets/nonfungible/src/erc.rs index 0dddd97bfd..3ad3e393aa 100644 --- a/pallets/nonfungible/src/erc.rs +++ b/pallets/nonfungible/src/erc.rs @@ -38,14 +38,14 @@ use pallet_evm::{account::CrossAccountId, PrecompileHandle}; use pallet_evm_coder_substrate::{ call, dispatch_to_evm, execution::{Error, PreDispatch, Result}, - frontier_contract, + frontier_contract, SubstrateRecorder, }; use pallet_structure::{weights::WeightInfo as _, SelfWeightOf as StructureWeight}; use sp_core::{Get, U256}; use sp_std::{vec, vec::Vec}; use up_data_structs::{ - CollectionId, CollectionPropertiesVec, Property, PropertyKey, PropertyKeyPermission, - PropertyPermission, TokenId, + budget::Budget, CollectionId, CollectionPropertiesVec, Property, PropertyKey, + PropertyKeyPermission, PropertyPermission, TokenId, }; use crate::{ @@ -78,6 +78,10 @@ frontier_contract! { impl Contract for NonfungibleHandle {...} } +fn nesting_budget(recorder: &SubstrateRecorder) -> impl Budget + '_ { + recorder.weight_calls_budget(>::find_parent()) +} + /// @title A contract that allows to set and delete token properties and change token property permissions. #[solidity_interface(name = TokenProperties, events(ERC721TokenEvent), enum(derive(PreDispatch)), enum_attr(weight))] impl NonfungibleHandle { @@ -146,7 +150,7 @@ impl NonfungibleHandle { /// @param key Property key. /// @param value Property value. #[solidity(hide)] - #[weight(>::set_token_properties(1))] + #[weight(>::set_token_properties(1))] fn set_property( &mut self, caller: Caller, @@ -161,16 +165,12 @@ impl NonfungibleHandle { .map_err(|_| "key too long")?; let value = value.0.try_into().map_err(|_| "value too long")?; - let nesting_budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::set_token_property( self, &caller, TokenId(token_id), Property { key, value }, - &nesting_budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::) } @@ -179,7 +179,7 @@ impl NonfungibleHandle { /// @dev Throws error if `msg.sender` has no permission to edit the property. /// @param tokenId ID of the token. /// @param properties settable properties - #[weight(>::set_token_properties(properties.len() as u32))] + #[weight(>::set_token_properties(properties.len() as u32))] fn set_properties( &mut self, caller: Caller, @@ -189,10 +189,6 @@ impl NonfungibleHandle { let caller = T::CrossAccountId::from_eth(caller); let token_id: u32 = token_id.try_into().map_err(|_| "token id overflow")?; - let nesting_budget = self - .recorder - .weight_calls_budget(>::find_parent()); - let properties = properties .into_iter() .map(eth::Property::try_into) @@ -203,7 +199,7 @@ impl NonfungibleHandle { &caller, TokenId(token_id), properties.into_iter(), - &nesting_budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::) } @@ -213,7 +209,7 @@ impl NonfungibleHandle { /// @param tokenId ID of the token. /// @param key Property key. #[solidity(hide)] - #[weight(>::delete_token_properties(1))] + #[weight(>::delete_token_properties(1))] fn delete_property(&mut self, token_id: U256, caller: Caller, key: String) -> Result<()> { let caller = T::CrossAccountId::from_eth(caller); let token_id: u32 = token_id.try_into().map_err(|_| "token id overflow")?; @@ -221,19 +217,21 @@ impl NonfungibleHandle { .try_into() .map_err(|_| "key too long")?; - let nesting_budget = self - .recorder - .weight_calls_budget(>::find_parent()); - - >::delete_token_property(self, &caller, TokenId(token_id), key, &nesting_budget) - .map_err(dispatch_to_evm::) + >::delete_token_property( + self, + &caller, + TokenId(token_id), + key, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::) } /// @notice Delete token properties value. /// @dev Throws error if `msg.sender` has no permission to edit the property. /// @param tokenId ID of the token. /// @param keys Properties key. - #[weight(>::delete_token_properties(keys.len() as u32))] + #[weight(>::delete_token_properties(keys.len() as u32))] fn delete_properties( &mut self, token_id: U256, @@ -247,16 +245,12 @@ impl NonfungibleHandle { .map(|k| Ok(>::from(k).try_into().map_err(|_| "key too long")?)) .collect::>>()?; - let nesting_budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::delete_token_properties( self, &caller, TokenId(token_id), keys.into_iter(), - &nesting_budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::) } @@ -481,12 +475,16 @@ impl NonfungibleHandle { let from = T::CrossAccountId::from_eth(from); let to = T::CrossAccountId::from_eth(to); let token = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::transfer_from(self, &caller, &from, &to, token, &budget) - .map_err(|e| dispatch_to_evm::(e.error))?; + >::transfer_from( + self, + &caller, + &from, + &to, + token, + &nesting_budget(&self.recorder), + ) + .map_err(|e| dispatch_to_evm::(e.error))?; Ok(()) } @@ -594,9 +592,6 @@ impl NonfungibleHandle { let caller = T::CrossAccountId::from_eth(caller); let to = T::CrossAccountId::from_eth(to); let token_id: u32 = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); if >::get(self.id) .checked_add(1) @@ -613,7 +608,7 @@ impl NonfungibleHandle { properties: BoundedVec::default(), owner: to, }, - &budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::)?; @@ -664,9 +659,6 @@ impl NonfungibleHandle { let caller = T::CrossAccountId::from_eth(caller); let to = T::CrossAccountId::from_eth(to); let token_id: u32 = token_id.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); if >::get(self.id) .checked_add(1) @@ -694,7 +686,7 @@ impl NonfungibleHandle { properties, owner: to, }, - &budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::)?; Ok(true) @@ -840,11 +832,8 @@ where let caller = T::CrossAccountId::from_eth(caller); let to = T::CrossAccountId::from_eth(to); let token = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::transfer(self, &caller, &to, token, &budget) + >::transfer(self, &caller, &to, token, &nesting_budget(&self.recorder)) .map_err(|e| dispatch_to_evm::(e.error))?; Ok(()) } @@ -864,11 +853,8 @@ where let caller = T::CrossAccountId::from_eth(caller); let to = to.into_sub_cross_account::()?; let token = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::transfer(self, &caller, &to, token, &budget) + >::transfer(self, &caller, &to, token, &nesting_budget(&self.recorder)) .map_err(|e| dispatch_to_evm::(e.error))?; Ok(()) } @@ -891,11 +877,16 @@ where let from = from.into_sub_cross_account::()?; let to = to.into_sub_cross_account::()?; let token_id = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - Pallet::::transfer_from(self, &caller, &from, &to, token_id, &budget) - .map_err(|e| dispatch_to_evm::(e.error))?; + + Pallet::::transfer_from( + self, + &caller, + &from, + &to, + token_id, + &nesting_budget(&self.recorder), + ) + .map_err(|e| dispatch_to_evm::(e.error))?; Ok(()) } @@ -911,11 +902,8 @@ where let caller = T::CrossAccountId::from_eth(caller); let from = T::CrossAccountId::from_eth(from); let token = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::burn_from(self, &caller, &from, token, &budget) + >::burn_from(self, &caller, &from, token, &nesting_budget(&self.recorder)) .map_err(dispatch_to_evm::)?; Ok(()) } @@ -936,11 +924,8 @@ where let caller = T::CrossAccountId::from_eth(caller); let from = from.into_sub_cross_account::()?; let token = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::burn_from(self, &caller, &from, token, &budget) + >::burn_from(self, &caller, &from, token, &nesting_budget(&self.recorder)) .map_err(dispatch_to_evm::)?; Ok(()) } @@ -966,9 +951,6 @@ where let mut expected_index = >::get(self.id) .checked_add(1) .ok_or("item id overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let total_tokens = token_ids.len(); for id in token_ids.into_iter() { @@ -985,7 +967,7 @@ where }) .collect(); - >::create_multiple_items(self, &caller, data, &budget) + >::create_multiple_items(self, &caller, data, &nesting_budget(&self.recorder)) .map_err(dispatch_to_evm::)?; Ok(true) } @@ -995,9 +977,6 @@ where #[weight(>::create_multiple_items(data.len() as u32) + >::set_token_properties(data.len() as u32))] fn mint_bulk_cross(&mut self, caller: Caller, data: Vec) -> Result { let caller = T::CrossAccountId::from_eth(caller); - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let mut create_nft_data = Vec::with_capacity(data.len()); for MintTokenData { owner, properties } in data { @@ -1013,8 +992,13 @@ where }); } - >::create_multiple_items(self, &caller, create_nft_data, &budget) - .map_err(dispatch_to_evm::)?; + >::create_multiple_items( + self, + &caller, + create_nft_data, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(true) } @@ -1037,9 +1021,6 @@ where let mut expected_index = >::get(self.id) .checked_add(1) .ok_or("item id overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let mut data = Vec::with_capacity(tokens.len()); for TokenUri { id, uri } in tokens { @@ -1066,7 +1047,7 @@ where }); } - >::create_multiple_items(self, &caller, data, &budget) + >::create_multiple_items(self, &caller, data, &nesting_budget(&self.recorder)) .map_err(dispatch_to_evm::)?; Ok(true) } @@ -1097,10 +1078,6 @@ where let caller = T::CrossAccountId::from_eth(caller); - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::create_item( self, &caller, @@ -1108,7 +1085,7 @@ where properties, owner: to, }, - &budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::)?; diff --git a/pallets/nonfungible/src/lib.rs b/pallets/nonfungible/src/lib.rs index 948e31061b..c309f294ac 100644 --- a/pallets/nonfungible/src/lib.rs +++ b/pallets/nonfungible/src/lib.rs @@ -109,7 +109,7 @@ use pallet_common::{ }; use pallet_evm::{account::CrossAccountId, Pallet as PalletEvm}; use pallet_evm_coder_substrate::{SubstrateRecorder, WithRecorder}; -use pallet_structure::{Error as StructureError, Pallet as PalletStructure}; +use pallet_structure::Pallet as PalletStructure; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::{Get, H160}; @@ -503,51 +503,6 @@ impl Pallet { Ok(()) } - /// Same as [`burn`] but burns all the tokens that are nested in the token first - /// - /// - `self_budget`: Limit for searching children in depth. - /// - `breadth_budget`: Limit of breadth of searching children. - /// - /// [`burn`]: struct.Pallet.html#method.burn - #[transactional] - pub fn burn_recursively( - collection: &NonfungibleHandle, - sender: &T::CrossAccountId, - token: TokenId, - self_budget: &dyn Budget, - breadth_budget: &dyn Budget, - ) -> DispatchResultWithPostInfo { - ensure!(self_budget.consume(), >::DepthLimit,); - - let current_token_account = - T::CrossTokenAddressMapping::token_to_address(collection.id, token); - - let mut weight = Weight::zero(); - - // This method is transactional, if user in fact doesn't have permissions to remove token - - // tokens removed here will be restored after rejected transaction - for ((collection, token), _) in >::iter_prefix((collection.id, token)) { - ensure!(breadth_budget.consume(), >::BreadthLimit,); - let PostDispatchInfo { actual_weight, .. } = - >::burn_item_recursively( - current_token_account.clone(), - collection, - token, - self_budget, - breadth_budget, - )?; - if let Some(actual_weight) = actual_weight { - weight = weight.saturating_add(actual_weight); - } - } - - Self::burn(collection, sender, token)?; - DispatchResultWithPostInfo::Ok(PostDispatchInfo { - actual_weight: Some(weight + >::burn_item()), - pays_fee: Pays::Yes, - }) - } - /// A batch operation to add, edit or remove properties for a token. /// /// - `nesting_budget`: Limit for searching parents in-depth to check ownership. @@ -568,7 +523,7 @@ impl Pallet { nesting_budget: &dyn Budget, ) -> DispatchResult { let mut property_writer = - pallet_common::property_writer_for_existing_token(collection, sender); + pallet_common::ExistingTokenPropertyWriter::new(collection, sender); property_writer.write_token_properties( sender, @@ -915,7 +870,7 @@ impl Pallet { // ========= - let mut property_writer = pallet_common::property_writer_for_new_token(collection, sender); + let mut property_writer = pallet_common::NewTokenPropertyWriter::new(collection, sender); with_transaction(|| { for (i, data) in data.iter().enumerate() { diff --git a/pallets/nonfungible/src/weights.rs b/pallets/nonfungible/src/weights.rs index 723099a4d8..377e283cd2 100644 --- a/pallets/nonfungible/src/weights.rs +++ b/pallets/nonfungible/src/weights.rs @@ -3,9 +3,9 @@ //! Autogenerated weights for pallet_nonfungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-05, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` +//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/nonfungible/src/weights.rs @@ -46,7 +46,8 @@ pub trait WeightInfo { fn burn_from() -> Weight; fn set_token_property_permissions(b: u32, ) -> Weight; fn set_token_properties(b: u32, ) -> Weight; - fn init_token_properties(b: u32, ) -> Weight; + fn load_token_properties() -> Weight; + fn write_token_properties(b: u32, ) -> Weight; fn delete_token_properties(b: u32, ) -> Weight; fn token_owner() -> Weight; fn set_allowance_for_all() -> Weight; @@ -69,8 +70,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 9_726_000 picoseconds. - Weight::from_parts(10_059_000, 3530) + // Minimum execution time: 4_990_000 picoseconds. + Weight::from_parts(5_170_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -87,10 +88,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 3_270_000 picoseconds. - Weight::from_parts(3_693_659, 3530) - // Standard Error: 255 - .saturating_add(Weight::from_parts(3_024_284, 0).saturating_mul(b.into())) + // Minimum execution time: 1_680_000 picoseconds. + Weight::from_parts(1_720_000, 3530) + // Standard Error: 674 + .saturating_add(Weight::from_parts(2_406_591, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -108,10 +109,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_188_000 picoseconds. - Weight::from_parts(3_307_000, 3481) - // Standard Error: 567 - .saturating_add(Weight::from_parts(4_320_449, 0).saturating_mul(b.into())) + // Minimum execution time: 1_680_000 picoseconds. + Weight::from_parts(1_720_000, 3481) + // Standard Error: 1_729 + .saturating_add(Weight::from_parts(3_418_983, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -136,8 +137,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 18_062_000 picoseconds. - Weight::from_parts(18_433_000, 3530) + // Minimum execution time: 10_700_000 picoseconds. + Weight::from_parts(11_180_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -159,8 +160,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 22_942_000 picoseconds. - Weight::from_parts(23_527_000, 3530) + // Minimum execution time: 13_650_000 picoseconds. + Weight::from_parts(13_910_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -185,10 +186,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 22_709_000 picoseconds. - Weight::from_parts(23_287_000, 5874) - // Standard Error: 89_471 - .saturating_add(Weight::from_parts(63_285_201, 0).saturating_mul(b.into())) + // Minimum execution time: 13_500_000 picoseconds. + Weight::from_parts(13_830_000, 5874) + // Standard Error: 136_447 + .saturating_add(Weight::from_parts(43_149_279, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -207,8 +208,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 13_652_000 picoseconds. - Weight::from_parts(13_981_000, 6070) + // Minimum execution time: 8_440_000 picoseconds. + Weight::from_parts(8_680_000, 6070) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -220,8 +221,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 7_837_000 picoseconds. - Weight::from_parts(8_113_000, 3522) + // Minimum execution time: 4_580_000 picoseconds. + Weight::from_parts(4_850_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -233,8 +234,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 7_769_000 picoseconds. - Weight::from_parts(7_979_000, 3522) + // Minimum execution time: 4_650_000 picoseconds. + Weight::from_parts(4_890_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -244,8 +245,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 4_194_000 picoseconds. - Weight::from_parts(4_353_000, 3522) + // Minimum execution time: 2_630_000 picoseconds. + Weight::from_parts(2_760_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -266,8 +267,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 21_978_000 picoseconds. - Weight::from_parts(22_519_000, 3530) + // Minimum execution time: 13_300_000 picoseconds. + Weight::from_parts(13_650_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -278,10 +279,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_457_000 picoseconds. - Weight::from_parts(1_563_000, 20191) - // Standard Error: 14_041 - .saturating_add(Weight::from_parts(8_452_415, 0).saturating_mul(b.into())) + // Minimum execution time: 550_000 picoseconds. + Weight::from_parts(600_000, 20191) + // Standard Error: 23_117 + .saturating_add(Weight::from_parts(6_048_092, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -296,24 +297,34 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 963_000 picoseconds. - Weight::from_parts(1_126_511, 36269) - // Standard Error: 9_175 - .saturating_add(Weight::from_parts(5_096_011, 0).saturating_mul(b.into())) + // Minimum execution time: 340_000 picoseconds. + Weight::from_parts(7_359_078, 36269) + // Standard Error: 9_052 + .saturating_add(Weight::from_parts(2_763_267, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: Nonfungible TokenProperties (r:1 w:0) + /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + fn load_token_properties() -> Weight { + // Proof Size summary in bytes: + // Measured: `279` + // Estimated: `36269` + // Minimum execution time: 1_610_000 picoseconds. + Weight::from_parts(1_690_000, 36269) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } /// Storage: Nonfungible TokenProperties (r:0 w:1) /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. - fn init_token_properties(b: u32, ) -> Weight { + fn write_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 194_000 picoseconds. - Weight::from_parts(222_000, 0) - // Standard Error: 7_295 - .saturating_add(Weight::from_parts(4_499_463, 0).saturating_mul(b.into())) + // Minimum execution time: 70_000 picoseconds. + Weight::from_parts(3_262_181, 0) + // Standard Error: 5_240 + .saturating_add(Weight::from_parts(2_426_582, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Common CollectionPropertyPermissions (r:1 w:0) @@ -327,10 +338,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 992_000 picoseconds. - Weight::from_parts(1_043_000, 36269) - // Standard Error: 37_370 - .saturating_add(Weight::from_parts(23_672_870, 0).saturating_mul(b.into())) + // Minimum execution time: 350_000 picoseconds. + Weight::from_parts(370_000, 36269) + // Standard Error: 29_081 + .saturating_add(Weight::from_parts(9_667_268, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -340,8 +351,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 3_743_000 picoseconds. - Weight::from_parts(3_908_000, 3522) + // Minimum execution time: 2_380_000 picoseconds. + Weight::from_parts(2_500_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -350,8 +361,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_106_000 picoseconds. - Weight::from_parts(4_293_000, 0) + // Minimum execution time: 2_060_000 picoseconds. + Weight::from_parts(2_150_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -360,8 +371,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 2_775_000 picoseconds. - Weight::from_parts(2_923_000, 3576) + // Minimum execution time: 1_630_000 picoseconds. + Weight::from_parts(1_730_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -370,8 +381,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 3_033_000 picoseconds. - Weight::from_parts(3_174_000, 36269) + // Minimum execution time: 1_700_000 picoseconds. + Weight::from_parts(1_780_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -391,8 +402,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 9_726_000 picoseconds. - Weight::from_parts(10_059_000, 3530) + // Minimum execution time: 4_990_000 picoseconds. + Weight::from_parts(5_170_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -409,10 +420,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 3_270_000 picoseconds. - Weight::from_parts(3_693_659, 3530) - // Standard Error: 255 - .saturating_add(Weight::from_parts(3_024_284, 0).saturating_mul(b.into())) + // Minimum execution time: 1_680_000 picoseconds. + Weight::from_parts(1_720_000, 3530) + // Standard Error: 674 + .saturating_add(Weight::from_parts(2_406_591, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -430,10 +441,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_188_000 picoseconds. - Weight::from_parts(3_307_000, 3481) - // Standard Error: 567 - .saturating_add(Weight::from_parts(4_320_449, 0).saturating_mul(b.into())) + // Minimum execution time: 1_680_000 picoseconds. + Weight::from_parts(1_720_000, 3481) + // Standard Error: 1_729 + .saturating_add(Weight::from_parts(3_418_983, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -458,8 +469,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 18_062_000 picoseconds. - Weight::from_parts(18_433_000, 3530) + // Minimum execution time: 10_700_000 picoseconds. + Weight::from_parts(11_180_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -481,8 +492,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 22_942_000 picoseconds. - Weight::from_parts(23_527_000, 3530) + // Minimum execution time: 13_650_000 picoseconds. + Weight::from_parts(13_910_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -507,10 +518,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1500 + b * (58 ±0)` // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 22_709_000 picoseconds. - Weight::from_parts(23_287_000, 5874) - // Standard Error: 89_471 - .saturating_add(Weight::from_parts(63_285_201, 0).saturating_mul(b.into())) + // Minimum execution time: 13_500_000 picoseconds. + Weight::from_parts(13_830_000, 5874) + // Standard Error: 136_447 + .saturating_add(Weight::from_parts(43_149_279, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -529,8 +540,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 13_652_000 picoseconds. - Weight::from_parts(13_981_000, 6070) + // Minimum execution time: 8_440_000 picoseconds. + Weight::from_parts(8_680_000, 6070) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -542,8 +553,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 7_837_000 picoseconds. - Weight::from_parts(8_113_000, 3522) + // Minimum execution time: 4_580_000 picoseconds. + Weight::from_parts(4_850_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -555,8 +566,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 7_769_000 picoseconds. - Weight::from_parts(7_979_000, 3522) + // Minimum execution time: 4_650_000 picoseconds. + Weight::from_parts(4_890_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -566,8 +577,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 4_194_000 picoseconds. - Weight::from_parts(4_353_000, 3522) + // Minimum execution time: 2_630_000 picoseconds. + Weight::from_parts(2_760_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible Allowance (r:1 w:1) @@ -588,8 +599,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 21_978_000 picoseconds. - Weight::from_parts(22_519_000, 3530) + // Minimum execution time: 13_300_000 picoseconds. + Weight::from_parts(13_650_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -600,10 +611,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_457_000 picoseconds. - Weight::from_parts(1_563_000, 20191) - // Standard Error: 14_041 - .saturating_add(Weight::from_parts(8_452_415, 0).saturating_mul(b.into())) + // Minimum execution time: 550_000 picoseconds. + Weight::from_parts(600_000, 20191) + // Standard Error: 23_117 + .saturating_add(Weight::from_parts(6_048_092, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -618,24 +629,34 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `640 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 963_000 picoseconds. - Weight::from_parts(1_126_511, 36269) - // Standard Error: 9_175 - .saturating_add(Weight::from_parts(5_096_011, 0).saturating_mul(b.into())) + // Minimum execution time: 340_000 picoseconds. + Weight::from_parts(7_359_078, 36269) + // Standard Error: 9_052 + .saturating_add(Weight::from_parts(2_763_267, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: Nonfungible TokenProperties (r:1 w:0) + /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + fn load_token_properties() -> Weight { + // Proof Size summary in bytes: + // Measured: `279` + // Estimated: `36269` + // Minimum execution time: 1_610_000 picoseconds. + Weight::from_parts(1_690_000, 36269) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } /// Storage: Nonfungible TokenProperties (r:0 w:1) /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. - fn init_token_properties(b: u32, ) -> Weight { + fn write_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 194_000 picoseconds. - Weight::from_parts(222_000, 0) - // Standard Error: 7_295 - .saturating_add(Weight::from_parts(4_499_463, 0).saturating_mul(b.into())) + // Minimum execution time: 70_000 picoseconds. + Weight::from_parts(3_262_181, 0) + // Standard Error: 5_240 + .saturating_add(Weight::from_parts(2_426_582, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Common CollectionPropertyPermissions (r:1 w:0) @@ -649,10 +670,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `699 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 992_000 picoseconds. - Weight::from_parts(1_043_000, 36269) - // Standard Error: 37_370 - .saturating_add(Weight::from_parts(23_672_870, 0).saturating_mul(b.into())) + // Minimum execution time: 350_000 picoseconds. + Weight::from_parts(370_000, 36269) + // Standard Error: 29_081 + .saturating_add(Weight::from_parts(9_667_268, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -662,8 +683,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 3_743_000 picoseconds. - Weight::from_parts(3_908_000, 3522) + // Minimum execution time: 2_380_000 picoseconds. + Weight::from_parts(2_500_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:0 w:1) @@ -672,8 +693,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_106_000 picoseconds. - Weight::from_parts(4_293_000, 0) + // Minimum execution time: 2_060_000 picoseconds. + Weight::from_parts(2_150_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Nonfungible CollectionAllowance (r:1 w:0) @@ -682,8 +703,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 2_775_000 picoseconds. - Weight::from_parts(2_923_000, 3576) + // Minimum execution time: 1_630_000 picoseconds. + Weight::from_parts(1_730_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Nonfungible TokenProperties (r:1 w:1) @@ -692,8 +713,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 3_033_000 picoseconds. - Weight::from_parts(3_174_000, 36269) + // Minimum execution time: 1_700_000 picoseconds. + Weight::from_parts(1_780_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/refungible/src/benchmarking.rs b/pallets/refungible/src/benchmarking.rs index 9d3657b16e..4008f668be 100644 --- a/pallets/refungible/src/benchmarking.rs +++ b/pallets/refungible/src/benchmarking.rs @@ -19,10 +19,7 @@ use core::{convert::TryInto, iter::IntoIterator}; use frame_benchmarking::v2::*; use pallet_common::{ bench_init, - benchmarking::{ - create_collection_raw, /*load_is_admin_and_property_permissions,*/ property_key, - property_value, - }, + benchmarking::{create_collection_raw, property_key, property_value}, }; use sp_std::prelude::*; use up_data_structs::{ @@ -424,6 +421,81 @@ mod benchmarks { Ok(()) } + // set_token_properties { + // let b in 0..MAX_PROPERTIES_PER_ITEM; + // bench_init!{ + // owner: sub; collection: collection(owner); + // owner: cross_from_sub; + // }; + // let perms = (0..b).map(|k| PropertyKeyPermission { + // key: property_key(k as usize), + // permission: PropertyPermission { + // mutable: false, + // collection_admin: true, + // token_owner: true, + // }, + // }).collect::>(); + // >::set_token_property_permissions(&collection, &owner, perms)?; + // let props = (0..b).map(|k| Property { + // key: property_key(k as usize), + // value: property_value(), + // }).collect::>(); + // let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; + // }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?} + + // load_token_properties { + // bench_init!{ + // owner: sub; collection: collection(owner); + // owner: cross_from_sub; + // }; + + // let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; + // }: { + // pallet_common::BenchmarkPropertyWriter::::load_token_properties( + // &collection, + // item, + // ) + // } + + // write_token_properties { + // let b in 0..MAX_PROPERTIES_PER_ITEM; + // bench_init!{ + // owner: sub; collection: collection(owner); + // owner: cross_from_sub; + // }; + + // let perms = (0..b).map(|k| PropertyKeyPermission { + // key: property_key(k as usize), + // permission: PropertyPermission { + // mutable: false, + // collection_admin: true, + // token_owner: true, + // }, + // }).collect::>(); + // >::set_token_property_permissions(&collection, &owner, perms)?; + // let props = (0..b).map(|k| Property { + // key: property_key(k as usize), + // value: property_value(), + // }).collect::>(); + // let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; + + // let lazy_collection_info = pallet_common::BenchmarkPropertyWriter::::load_collection_info( + // &collection, + // &owner, + // ); + // }: { + // let mut property_writer = pallet_common::BenchmarkPropertyWriter::new(&collection, lazy_collection_info); + + // property_writer.write_token_properties( + // item, + // props.into_iter(), + // crate::erc::ERC721TokenEvent::TokenChanged { + // token_id: item.into(), + // } + // .to_log(T::ContractAddress::get()), + // )? + // } + #[benchmark] fn set_token_property_permissions( b: Linear<0, MAX_PROPERTIES_PER_ITEM>, diff --git a/pallets/refungible/src/common.rs b/pallets/refungible/src/common.rs index 74aa322bb9..bae6dc94a8 100644 --- a/pallets/refungible/src/common.rs +++ b/pallets/refungible/src/common.rs @@ -16,14 +16,12 @@ use core::marker::PhantomData; -use frame_support::{ - dispatch::DispatchResultWithPostInfo, ensure, fail, traits::Get, weights::Weight, -}; +use frame_support::{dispatch::DispatchResultWithPostInfo, fail, weights::Weight}; use pallet_common::{ - init_token_properties_delta, weights::WeightInfo as _, with_weight, CommonCollectionOperations, - CommonWeightInfo, RefungibleExtensions, + weights::WeightInfo as _, with_weight, write_token_properties_total_weight, + CommonCollectionOperations, CommonWeightInfo, RefungibleExtensions, }; -use pallet_structure::{Error as StructureError, Pallet as PalletStructure}; +use pallet_structure::Pallet as PalletStructure; use sp_runtime::DispatchError; use sp_std::{collections::btree_map::BTreeMap, vec, vec::Vec}; use up_data_structs::{ @@ -50,14 +48,14 @@ pub struct CommonWeights(PhantomData); impl CommonWeightInfo for CommonWeights { fn create_multiple_items(data: &[up_data_structs::CreateItemData]) -> Weight { >::create_multiple_items(data.len() as u32).saturating_add( - init_token_properties_delta::( + write_token_properties_total_weight::( data.iter().map(|data| match data { up_data_structs::CreateItemData::ReFungible(rft_data) => { rft_data.properties.len() as u32 } _ => 0, }), - >::init_token_properties, + >::write_token_properties, ), ) } @@ -66,16 +64,16 @@ impl CommonWeightInfo for CommonWeights { match call { CreateItemExData::RefungibleMultipleOwners(i) => { >::create_multiple_items_ex_multiple_owners(i.users.len() as u32) - .saturating_add(init_token_properties_delta::( + .saturating_add(write_token_properties_total_weight::( [i.properties.len() as u32].into_iter(), - >::init_token_properties, + >::write_token_properties, )) } CreateItemExData::RefungibleMultipleItems(i) => { >::create_multiple_items_ex_multiple_items(i.len() as u32) - .saturating_add(init_token_properties_delta::( + .saturating_add(write_token_properties_total_weight::( i.iter().map(|d| d.properties.len() as u32), - >::init_token_properties, + >::write_token_properties, )) } _ => Weight::zero(), @@ -90,16 +88,11 @@ impl CommonWeightInfo for CommonWeights { >::set_collection_properties(amount) } - fn delete_collection_properties(amount: u32) -> Weight { - >::delete_collection_properties(amount) - } - fn set_token_properties(amount: u32) -> Weight { - >::set_token_properties(amount) - } - - fn delete_token_properties(amount: u32) -> Weight { - >::delete_token_properties(amount) + write_token_properties_total_weight::([amount].into_iter(), |amount| { + >::load_token_properties() + + >::write_token_properties(amount) + }) } fn set_token_property_permissions(amount: u32) -> Weight { @@ -136,19 +129,6 @@ impl CommonWeightInfo for CommonWeights { >::burn_from() } - fn burn_recursively_self_raw() -> Weight { - // Read to get total balance - Self::burn_item() + T::DbWeight::get().reads(1) - } - fn burn_recursively_breadth_raw(_amount: u32) -> Weight { - // Refungible token can't have children - Weight::zero() - } - - fn token_owner() -> Weight { - >::token_owner() - } - fn set_allowance_for_all() -> Weight { >::set_allowance_for_all() } @@ -265,25 +245,6 @@ impl CommonCollectionOperations for RefungibleHandle { ) } - fn burn_item_recursively( - &self, - sender: T::CrossAccountId, - token: TokenId, - self_budget: &dyn Budget, - _breadth_budget: &dyn Budget, - ) -> DispatchResultWithPostInfo { - ensure!(self_budget.consume(), >::DepthLimit,); - with_weight( - >::burn( - self, - &sender, - token, - >::get((self.id, token, &sender)), - ), - >::burn_recursively_self_raw(), - ) - } - fn transfer( &self, from: T::CrossAccountId, diff --git a/pallets/refungible/src/erc.rs b/pallets/refungible/src/erc.rs index 89d0d87c27..4515a45920 100644 --- a/pallets/refungible/src/erc.rs +++ b/pallets/refungible/src/erc.rs @@ -32,26 +32,26 @@ use frame_support::{BoundedBTreeMap, BoundedVec}; use pallet_common::{ erc::{static_property::key, CollectionCall, CommonEvmHandler}, eth::{self, TokenUri}, - CollectionHandle, CollectionPropertyPermissions, CommonCollectionOperations, + CollectionHandle, CollectionPropertyPermissions, CommonCollectionOperations, CommonWeightInfo, Error as CommonError, }; use pallet_evm::{account::CrossAccountId, PrecompileHandle}; use pallet_evm_coder_substrate::{ call, dispatch_to_evm, execution::{Error, PreDispatch, Result}, - frontier_contract, + frontier_contract, SubstrateRecorder, }; use pallet_structure::{weights::WeightInfo as _, SelfWeightOf as StructureWeight}; use sp_core::{Get, H160, U256}; use sp_std::{collections::btree_map::BTreeMap, vec, vec::Vec}; use up_data_structs::{ - mapping::TokenAddressMapping, CollectionId, CollectionPropertiesVec, Property, PropertyKey, - PropertyKeyPermission, PropertyPermission, TokenId, TokenOwnerError, + budget::Budget, mapping::TokenAddressMapping, CollectionId, CollectionPropertiesVec, Property, + PropertyKey, PropertyKeyPermission, PropertyPermission, TokenId, TokenOwnerError, }; use crate::{ - weights::WeightInfo, AccountBalance, Balance, Config, CreateItemData, Pallet, RefungibleHandle, - SelfWeightOf, TokenProperties, TokensMinted, TotalSupply, + common::CommonWeights, weights::WeightInfo, AccountBalance, Balance, Config, CreateItemData, + Pallet, RefungibleHandle, SelfWeightOf, TokenProperties, TokensMinted, TotalSupply, }; frontier_contract! { @@ -90,6 +90,10 @@ pub struct MintTokenData { pub properties: Vec, } +pub fn nesting_budget(recorder: &SubstrateRecorder) -> impl Budget + '_ { + recorder.weight_calls_budget(>::find_parent()) +} + /// @title A contract that allows to set and delete token properties and change token property permissions. #[solidity_interface(name = TokenProperties, events(ERC721TokenEvent), enum(derive(PreDispatch)), enum_attr(weight))] impl RefungibleHandle { @@ -158,7 +162,7 @@ impl RefungibleHandle { /// @param key Property key. /// @param value Property value. #[solidity(hide)] - #[weight(>::set_token_properties(1))] + #[weight(>::set_token_properties(1))] fn set_property( &mut self, caller: Caller, @@ -173,16 +177,12 @@ impl RefungibleHandle { .map_err(|_| "key too long")?; let value = value.0.try_into().map_err(|_| "value too long")?; - let nesting_budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::set_token_property( self, &caller, TokenId(token_id), Property { key, value }, - &nesting_budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::) } @@ -191,7 +191,7 @@ impl RefungibleHandle { /// @dev Throws error if `msg.sender` has no permission to edit the property. /// @param tokenId ID of the token. /// @param properties settable properties - #[weight(>::set_token_properties(properties.len() as u32))] + #[weight(>::set_token_properties(properties.len() as u32))] fn set_properties( &mut self, caller: Caller, @@ -201,10 +201,6 @@ impl RefungibleHandle { let caller = T::CrossAccountId::from_eth(caller); let token_id: u32 = token_id.try_into().map_err(|_| "token id overflow")?; - let nesting_budget = self - .recorder - .weight_calls_budget(>::find_parent()); - let properties = properties .into_iter() .map(eth::Property::try_into) @@ -215,7 +211,7 @@ impl RefungibleHandle { &caller, TokenId(token_id), properties.into_iter(), - &nesting_budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::) } @@ -225,7 +221,7 @@ impl RefungibleHandle { /// @param tokenId ID of the token. /// @param key Property key. #[solidity(hide)] - #[weight(>::delete_token_properties(1))] + #[weight(>::delete_token_properties(1))] fn delete_property(&mut self, token_id: U256, caller: Caller, key: String) -> Result<()> { let caller = T::CrossAccountId::from_eth(caller); let token_id: u32 = token_id.try_into().map_err(|_| "token id overflow")?; @@ -233,19 +229,21 @@ impl RefungibleHandle { .try_into() .map_err(|_| "key too long")?; - let nesting_budget = self - .recorder - .weight_calls_budget(>::find_parent()); - - >::delete_token_property(self, &caller, TokenId(token_id), key, &nesting_budget) - .map_err(dispatch_to_evm::) + >::delete_token_property( + self, + &caller, + TokenId(token_id), + key, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::) } /// @notice Delete token properties value. /// @dev Throws error if `msg.sender` has no permission to edit the property. /// @param tokenId ID of the token. /// @param keys Properties key. - #[weight(>::delete_token_properties(keys.len() as u32))] + #[weight(>::delete_token_properties(keys.len() as u32))] fn delete_properties( &mut self, token_id: U256, @@ -259,16 +257,12 @@ impl RefungibleHandle { .map(|k| Ok(>::from(k).try_into().map_err(|_| "key too long")?)) .collect::>>()?; - let nesting_budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::delete_token_properties( self, &caller, TokenId(token_id), keys.into_iter(), - &nesting_budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::) } @@ -497,15 +491,20 @@ impl RefungibleHandle { let from = T::CrossAccountId::from_eth(from); let to = T::CrossAccountId::from_eth(to); let token = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let balance = balance(self, token, &from)?; ensure_single_owner(self, token, balance)?; - >::transfer_from(self, &caller, &from, &to, token, balance, &budget) - .map_err(dispatch_to_evm::)?; + >::transfer_from( + self, + &caller, + &from, + &to, + token, + balance, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(()) } @@ -629,9 +628,6 @@ impl RefungibleHandle { let caller = T::CrossAccountId::from_eth(caller); let to = T::CrossAccountId::from_eth(to); let token_id: u32 = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); if >::get(self.id) .checked_add(1) @@ -653,7 +649,7 @@ impl RefungibleHandle { users, properties: CollectionPropertiesVec::default(), }, - &budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::)?; @@ -704,9 +700,6 @@ impl RefungibleHandle { let caller = T::CrossAccountId::from_eth(caller); let to = T::CrossAccountId::from_eth(to); let token_id: u32 = token_id.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); if >::get(self.id) .checked_add(1) @@ -736,7 +729,7 @@ impl RefungibleHandle { self, &caller, CreateItemData:: { users, properties }, - &budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::)?; Ok(true) @@ -865,15 +858,19 @@ where let caller = T::CrossAccountId::from_eth(caller); let to = T::CrossAccountId::from_eth(to); let token = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let balance = balance(self, token, &caller)?; ensure_single_owner(self, token, balance)?; - >::transfer(self, &caller, &to, token, balance, &budget) - .map_err(dispatch_to_evm::)?; + >::transfer( + self, + &caller, + &to, + token, + balance, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(()) } @@ -893,15 +890,19 @@ where let caller = T::CrossAccountId::from_eth(caller); let to = to.into_sub_cross_account::()?; let token = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let balance = balance(self, token, &caller)?; ensure_single_owner(self, token, balance)?; - >::transfer(self, &caller, &to, token, balance, &budget) - .map_err(dispatch_to_evm::)?; + >::transfer( + self, + &caller, + &to, + token, + balance, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(()) } @@ -923,15 +924,20 @@ where let from = from.into_sub_cross_account::()?; let to = to.into_sub_cross_account::()?; let token_id = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let balance = balance(self, token_id, &from)?; ensure_single_owner(self, token_id, balance)?; - Pallet::::transfer_from(self, &caller, &from, &to, token_id, balance, &budget) - .map_err(dispatch_to_evm::)?; + Pallet::::transfer_from( + self, + &caller, + &from, + &to, + token_id, + balance, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(()) } @@ -948,15 +954,19 @@ where let caller = T::CrossAccountId::from_eth(caller); let from = T::CrossAccountId::from_eth(from); let token = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let balance = balance(self, token, &from)?; ensure_single_owner(self, token, balance)?; - >::burn_from(self, &caller, &from, token, balance, &budget) - .map_err(dispatch_to_evm::)?; + >::burn_from( + self, + &caller, + &from, + token, + balance, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(()) } @@ -977,15 +987,19 @@ where let caller = T::CrossAccountId::from_eth(caller); let from = from.into_sub_cross_account::()?; let token = token_id.try_into()?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let balance = balance(self, token, &from)?; ensure_single_owner(self, token, balance)?; - >::burn_from(self, &caller, &from, token, balance, &budget) - .map_err(dispatch_to_evm::)?; + >::burn_from( + self, + &caller, + &from, + token, + balance, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(()) } @@ -1010,9 +1024,6 @@ where let mut expected_index = >::get(self.id) .checked_add(1) .ok_or("item id overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let total_tokens = token_ids.len(); for id in token_ids.into_iter() { @@ -1035,7 +1046,7 @@ where .map(|_| create_item_data.clone()) .collect(); - >::create_multiple_items(self, &caller, data, &budget) + >::create_multiple_items(self, &caller, data, &nesting_budget(&self.recorder)) .map_err(dispatch_to_evm::)?; Ok(true) } @@ -1053,9 +1064,6 @@ where token_properties: Vec, ) -> Result { let caller = T::CrossAccountId::from_eth(caller); - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let has_multiple_tokens = token_properties.len() > 1; let mut create_rft_data = Vec::with_capacity(token_properties.len()); @@ -1084,8 +1092,13 @@ where }); } - >::create_multiple_items(self, &caller, create_rft_data, &budget) - .map_err(dispatch_to_evm::)?; + >::create_multiple_items( + self, + &caller, + create_rft_data, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(true) } @@ -1108,9 +1121,6 @@ where let mut expected_index = >::get(self.id) .checked_add(1) .ok_or("item id overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); let mut data = Vec::with_capacity(tokens.len()); let users: BoundedBTreeMap<_, _, _> = [(to, 1)] @@ -1143,7 +1153,7 @@ where data.push(create_item_data); } - >::create_multiple_items(self, &caller, data, &budget) + >::create_multiple_items(self, &caller, data, &nesting_budget(&self.recorder)) .map_err(dispatch_to_evm::)?; Ok(true) } @@ -1174,10 +1184,6 @@ where let caller = T::CrossAccountId::from_eth(caller); - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - let users = [(to, 1)] .into_iter() .collect::>() @@ -1187,7 +1193,7 @@ where self, &caller, CreateItemData:: { users, properties }, - &budget, + &nesting_budget(&self.recorder), ) .map_err(dispatch_to_evm::)?; diff --git a/pallets/refungible/src/erc_token.rs b/pallets/refungible/src/erc_token.rs index 7269552ea3..182eb31935 100644 --- a/pallets/refungible/src/erc_token.rs +++ b/pallets/refungible/src/erc_token.rs @@ -37,14 +37,13 @@ use pallet_evm_coder_substrate::{ execution::{PreDispatch, Result}, frontier_contract, WithRecorder, }; -use pallet_structure::{weights::WeightInfo as _, SelfWeightOf as StructureWeight}; use sp_core::U256; use sp_std::vec::Vec; use up_data_structs::TokenId; use crate::{ - common::CommonWeights, weights::WeightInfo, Allowance, Balance, Config, Pallet, - RefungibleHandle, SelfWeightOf, TotalSupply, + common::CommonWeights, erc::nesting_budget, weights::WeightInfo, Allowance, Balance, Config, + Pallet, RefungibleHandle, SelfWeightOf, TotalSupply, }; /// Refungible token handle contains information about token's collection and id @@ -140,12 +139,16 @@ impl RefungibleTokenHandle { let caller = T::CrossAccountId::from_eth(caller); let to = T::CrossAccountId::from_eth(to); let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::transfer(self, &caller, &to, self.1, amount, &budget) - .map_err(dispatch_to_evm::)?; + >::transfer( + self, + &caller, + &to, + self.1, + amount, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(true) } @@ -165,12 +168,17 @@ impl RefungibleTokenHandle { let from = T::CrossAccountId::from_eth(from); let to = T::CrossAccountId::from_eth(to); let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::transfer_from(self, &caller, &from, &to, self.1, amount, &budget) - .map_err(dispatch_to_evm::)?; + >::transfer_from( + self, + &caller, + &from, + &to, + self.1, + amount, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(true) } @@ -231,12 +239,16 @@ where let caller = T::CrossAccountId::from_eth(caller); let from = T::CrossAccountId::from_eth(from); let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::burn_from(self, &caller, &from, self.1, amount, &budget) - .map_err(dispatch_to_evm::)?; + >::burn_from( + self, + &caller, + &from, + self.1, + amount, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(true) } @@ -254,12 +266,16 @@ where let caller = T::CrossAccountId::from_eth(caller); let from = from.into_sub_cross_account::()?; let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::burn_from(self, &caller, &from, self.1, amount, &budget) - .map_err(dispatch_to_evm::)?; + >::burn_from( + self, + &caller, + &from, + self.1, + amount, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(true) } @@ -315,12 +331,16 @@ where let caller = T::CrossAccountId::from_eth(caller); let to = to.into_sub_cross_account::()?; let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::transfer(self, &caller, &to, self.1, amount, &budget) - .map_err(dispatch_to_evm::)?; + >::transfer( + self, + &caller, + &to, + self.1, + amount, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(true) } @@ -340,12 +360,17 @@ where let from = from.into_sub_cross_account::()?; let to = to.into_sub_cross_account::()?; let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder - .weight_calls_budget(>::find_parent()); - >::transfer_from(self, &caller, &from, &to, self.1, amount, &budget) - .map_err(dispatch_to_evm::)?; + >::transfer_from( + self, + &caller, + &from, + &to, + self.1, + amount, + &nesting_budget(&self.recorder), + ) + .map_err(dispatch_to_evm::)?; Ok(true) } } diff --git a/pallets/refungible/src/lib.rs b/pallets/refungible/src/lib.rs index ea03543ab8..8017c6f6f7 100644 --- a/pallets/refungible/src/lib.rs +++ b/pallets/refungible/src/lib.rs @@ -507,7 +507,7 @@ impl Pallet { nesting_budget: &dyn Budget, ) -> DispatchResult { let mut property_writer = - pallet_common::property_writer_for_existing_token(collection, sender); + pallet_common::ExistingTokenPropertyWriter::new(collection, sender); property_writer.write_token_properties( sender, @@ -858,7 +858,7 @@ impl Pallet { // ========= - let mut property_writer = pallet_common::property_writer_for_new_token(collection, sender); + let mut property_writer = pallet_common::NewTokenPropertyWriter::new(collection, sender); with_transaction(|| { for (i, data) in data.iter().enumerate() { diff --git a/pallets/refungible/src/weights.rs b/pallets/refungible/src/weights.rs index 9c1c09667a..e58b965648 100644 --- a/pallets/refungible/src/weights.rs +++ b/pallets/refungible/src/weights.rs @@ -3,9 +3,9 @@ //! Autogenerated weights for pallet_refungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-30, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-05, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` +//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/refungible/src/weights.rs @@ -52,7 +52,8 @@ pub trait WeightInfo { fn burn_from() -> Weight; fn set_token_property_permissions(b: u32, ) -> Weight; fn set_token_properties(b: u32, ) -> Weight; - fn init_token_properties(b: u32, ) -> Weight; + fn load_token_properties() -> Weight; + fn write_token_properties(b: u32, ) -> Weight; fn delete_token_properties(b: u32, ) -> Weight; fn repartition_item() -> Weight; fn token_owner() -> Weight; @@ -78,8 +79,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 11_341_000 picoseconds. - Weight::from_parts(11_741_000, 3530) + // Minimum execution time: 5_710_000 picoseconds. + Weight::from_parts(5_980_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -98,10 +99,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 2_665_000 picoseconds. - Weight::from_parts(2_791_000, 3530) - // Standard Error: 996 - .saturating_add(Weight::from_parts(4_343_736, 0).saturating_mul(b.into())) + // Minimum execution time: 1_300_000 picoseconds. + Weight::from_parts(1_360_000, 3530) + // Standard Error: 2_783 + .saturating_add(Weight::from_parts(3_456_531, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -121,10 +122,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 2_616_000 picoseconds. - Weight::from_parts(2_726_000, 3481) - // Standard Error: 665 - .saturating_add(Weight::from_parts(5_554_066, 0).saturating_mul(b.into())) + // Minimum execution time: 1_290_000 picoseconds. + Weight::from_parts(1_370_000, 3481) + // Standard Error: 3_198 + .saturating_add(Weight::from_parts(4_435_305, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -146,10 +147,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_697_000 picoseconds. - Weight::from_parts(2_136_481, 3481) - // Standard Error: 567 - .saturating_add(Weight::from_parts(4_390_621, 0).saturating_mul(b.into())) + // Minimum execution time: 1_730_000 picoseconds. + Weight::from_parts(1_810_000, 3481) + // Standard Error: 1_923 + .saturating_add(Weight::from_parts(3_500_817, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -168,8 +169,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 22_859_000 picoseconds. - Weight::from_parts(23_295_000, 8682) + // Minimum execution time: 14_010_000 picoseconds. + Weight::from_parts(16_300_000, 8682) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -189,8 +190,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 21_477_000 picoseconds. - Weight::from_parts(22_037_000, 3554) + // Minimum execution time: 13_700_000 picoseconds. + Weight::from_parts(14_180_000, 3554) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -202,8 +203,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 13_714_000 picoseconds. - Weight::from_parts(14_050_000, 6118) + // Minimum execution time: 8_990_000 picoseconds. + Weight::from_parts(9_400_000, 6118) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -219,8 +220,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 15_879_000 picoseconds. - Weight::from_parts(16_266_000, 6118) + // Minimum execution time: 10_240_000 picoseconds. + Weight::from_parts(10_610_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -236,8 +237,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 18_186_000 picoseconds. - Weight::from_parts(18_682_000, 6118) + // Minimum execution time: 12_040_000 picoseconds. + Weight::from_parts(12_390_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -253,8 +254,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 17_943_000 picoseconds. - Weight::from_parts(18_333_000, 6118) + // Minimum execution time: 11_940_000 picoseconds. + Weight::from_parts(12_240_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -266,8 +267,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 8_391_000 picoseconds. - Weight::from_parts(8_637_000, 3554) + // Minimum execution time: 5_150_000 picoseconds. + Weight::from_parts(5_440_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -279,8 +280,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 8_519_000 picoseconds. - Weight::from_parts(8_760_000, 3554) + // Minimum execution time: 5_170_000 picoseconds. + Weight::from_parts(5_400_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -294,8 +295,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 19_554_000 picoseconds. - Weight::from_parts(20_031_000, 6118) + // Minimum execution time: 13_150_000 picoseconds. + Weight::from_parts(13_600_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -313,8 +314,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 21_338_000 picoseconds. - Weight::from_parts(21_803_000, 6118) + // Minimum execution time: 14_280_000 picoseconds. + Weight::from_parts(14_680_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -332,8 +333,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 24_179_000 picoseconds. - Weight::from_parts(24_647_000, 6118) + // Minimum execution time: 16_110_000 picoseconds. + Weight::from_parts(16_710_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -351,8 +352,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 24_008_000 picoseconds. - Weight::from_parts(24_545_000, 6118) + // Minimum execution time: 16_130_000 picoseconds. + Weight::from_parts(16_680_000, 6118) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -374,8 +375,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 27_907_000 picoseconds. - Weight::from_parts(28_489_000, 3570) + // Minimum execution time: 18_380_000 picoseconds. + Weight::from_parts(18_870_000, 3570) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -386,10 +387,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_460_000 picoseconds. - Weight::from_parts(1_564_000, 20191) - // Standard Error: 14_117 - .saturating_add(Weight::from_parts(8_196_214, 0).saturating_mul(b.into())) + // Minimum execution time: 580_000 picoseconds. + Weight::from_parts(660_000, 20191) + // Standard Error: 29_964 + .saturating_add(Weight::from_parts(6_251_766, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -404,24 +405,34 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 1_012_000 picoseconds. - Weight::from_parts(1_081_000, 36269) - // Standard Error: 6_838 - .saturating_add(Weight::from_parts(5_801_181, 0).saturating_mul(b.into())) + // Minimum execution time: 350_000 picoseconds. + Weight::from_parts(2_269_806, 36269) + // Standard Error: 7_751 + .saturating_add(Weight::from_parts(3_068_126, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: Refungible TokenProperties (r:1 w:0) + /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + fn load_token_properties() -> Weight { + // Proof Size summary in bytes: + // Measured: `120` + // Estimated: `36269` + // Minimum execution time: 1_010_000 picoseconds. + Weight::from_parts(1_080_000, 36269) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } /// Storage: Refungible TokenProperties (r:0 w:1) /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. - fn init_token_properties(b: u32, ) -> Weight { + fn write_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 229_000 picoseconds. - Weight::from_parts(253_000, 0) - // Standard Error: 100_218 - .saturating_add(Weight::from_parts(12_632_221, 0).saturating_mul(b.into())) + // Minimum execution time: 70_000 picoseconds. + Weight::from_parts(1_363_449, 0) + // Standard Error: 8_964 + .saturating_add(Weight::from_parts(2_665_759, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Common CollectionPropertyPermissions (r:1 w:0) @@ -435,10 +446,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 1_014_000 picoseconds. - Weight::from_parts(1_065_000, 36269) - // Standard Error: 39_536 - .saturating_add(Weight::from_parts(24_125_838, 0).saturating_mul(b.into())) + // Minimum execution time: 320_000 picoseconds. + Weight::from_parts(370_000, 36269) + // Standard Error: 28_541 + .saturating_add(Weight::from_parts(9_863_065, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -450,8 +461,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 10_315_000 picoseconds. - Weight::from_parts(10_601_000, 3554) + // Minimum execution time: 6_320_000 picoseconds. + Weight::from_parts(6_640_000, 3554) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -461,8 +472,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 4_898_000 picoseconds. - Weight::from_parts(5_136_000, 6118) + // Minimum execution time: 2_520_000 picoseconds. + Weight::from_parts(2_680_000, 6118) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -471,8 +482,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_146_000 picoseconds. - Weight::from_parts(4_337_000, 0) + // Minimum execution time: 2_070_000 picoseconds. + Weight::from_parts(2_230_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -481,8 +492,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 2_170_000 picoseconds. - Weight::from_parts(2_301_000, 3576) + // Minimum execution time: 1_270_000 picoseconds. + Weight::from_parts(1_420_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -491,8 +502,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 2_098_000 picoseconds. - Weight::from_parts(2_251_000, 36269) + // Minimum execution time: 1_010_000 picoseconds. + Weight::from_parts(1_160_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -514,8 +525,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 11_341_000 picoseconds. - Weight::from_parts(11_741_000, 3530) + // Minimum execution time: 5_710_000 picoseconds. + Weight::from_parts(5_980_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -534,10 +545,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 2_665_000 picoseconds. - Weight::from_parts(2_791_000, 3530) - // Standard Error: 996 - .saturating_add(Weight::from_parts(4_343_736, 0).saturating_mul(b.into())) + // Minimum execution time: 1_300_000 picoseconds. + Weight::from_parts(1_360_000, 3530) + // Standard Error: 2_783 + .saturating_add(Weight::from_parts(3_456_531, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -557,10 +568,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 2_616_000 picoseconds. - Weight::from_parts(2_726_000, 3481) - // Standard Error: 665 - .saturating_add(Weight::from_parts(5_554_066, 0).saturating_mul(b.into())) + // Minimum execution time: 1_290_000 picoseconds. + Weight::from_parts(1_370_000, 3481) + // Standard Error: 3_198 + .saturating_add(Weight::from_parts(4_435_305, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -582,10 +593,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_697_000 picoseconds. - Weight::from_parts(2_136_481, 3481) - // Standard Error: 567 - .saturating_add(Weight::from_parts(4_390_621, 0).saturating_mul(b.into())) + // Minimum execution time: 1_730_000 picoseconds. + Weight::from_parts(1_810_000, 3481) + // Standard Error: 1_923 + .saturating_add(Weight::from_parts(3_500_817, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -604,8 +615,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 22_859_000 picoseconds. - Weight::from_parts(23_295_000, 8682) + // Minimum execution time: 14_010_000 picoseconds. + Weight::from_parts(16_300_000, 8682) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -625,8 +636,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 21_477_000 picoseconds. - Weight::from_parts(22_037_000, 3554) + // Minimum execution time: 13_700_000 picoseconds. + Weight::from_parts(14_180_000, 3554) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -638,8 +649,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 13_714_000 picoseconds. - Weight::from_parts(14_050_000, 6118) + // Minimum execution time: 8_990_000 picoseconds. + Weight::from_parts(9_400_000, 6118) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -655,8 +666,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 15_879_000 picoseconds. - Weight::from_parts(16_266_000, 6118) + // Minimum execution time: 10_240_000 picoseconds. + Weight::from_parts(10_610_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -672,8 +683,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 18_186_000 picoseconds. - Weight::from_parts(18_682_000, 6118) + // Minimum execution time: 12_040_000 picoseconds. + Weight::from_parts(12_390_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -689,8 +700,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 17_943_000 picoseconds. - Weight::from_parts(18_333_000, 6118) + // Minimum execution time: 11_940_000 picoseconds. + Weight::from_parts(12_240_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -702,8 +713,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 8_391_000 picoseconds. - Weight::from_parts(8_637_000, 3554) + // Minimum execution time: 5_150_000 picoseconds. + Weight::from_parts(5_440_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -715,8 +726,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 8_519_000 picoseconds. - Weight::from_parts(8_760_000, 3554) + // Minimum execution time: 5_170_000 picoseconds. + Weight::from_parts(5_400_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -730,8 +741,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 19_554_000 picoseconds. - Weight::from_parts(20_031_000, 6118) + // Minimum execution time: 13_150_000 picoseconds. + Weight::from_parts(13_600_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -749,8 +760,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 21_338_000 picoseconds. - Weight::from_parts(21_803_000, 6118) + // Minimum execution time: 14_280_000 picoseconds. + Weight::from_parts(14_680_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -768,8 +779,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 24_179_000 picoseconds. - Weight::from_parts(24_647_000, 6118) + // Minimum execution time: 16_110_000 picoseconds. + Weight::from_parts(16_710_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -787,8 +798,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 24_008_000 picoseconds. - Weight::from_parts(24_545_000, 6118) + // Minimum execution time: 16_130_000 picoseconds. + Weight::from_parts(16_680_000, 6118) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -810,8 +821,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 27_907_000 picoseconds. - Weight::from_parts(28_489_000, 3570) + // Minimum execution time: 18_380_000 picoseconds. + Weight::from_parts(18_870_000, 3570) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -822,10 +833,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_460_000 picoseconds. - Weight::from_parts(1_564_000, 20191) - // Standard Error: 14_117 - .saturating_add(Weight::from_parts(8_196_214, 0).saturating_mul(b.into())) + // Minimum execution time: 580_000 picoseconds. + Weight::from_parts(660_000, 20191) + // Standard Error: 29_964 + .saturating_add(Weight::from_parts(6_251_766, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -840,24 +851,34 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `502 + b * (261 ±0)` // Estimated: `36269` - // Minimum execution time: 1_012_000 picoseconds. - Weight::from_parts(1_081_000, 36269) - // Standard Error: 6_838 - .saturating_add(Weight::from_parts(5_801_181, 0).saturating_mul(b.into())) + // Minimum execution time: 350_000 picoseconds. + Weight::from_parts(2_269_806, 36269) + // Standard Error: 7_751 + .saturating_add(Weight::from_parts(3_068_126, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: Refungible TokenProperties (r:1 w:0) + /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + fn load_token_properties() -> Weight { + // Proof Size summary in bytes: + // Measured: `120` + // Estimated: `36269` + // Minimum execution time: 1_010_000 picoseconds. + Weight::from_parts(1_080_000, 36269) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } /// Storage: Refungible TokenProperties (r:0 w:1) /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) /// The range of component `b` is `[0, 64]`. - fn init_token_properties(b: u32, ) -> Weight { + fn write_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 229_000 picoseconds. - Weight::from_parts(253_000, 0) - // Standard Error: 100_218 - .saturating_add(Weight::from_parts(12_632_221, 0).saturating_mul(b.into())) + // Minimum execution time: 70_000 picoseconds. + Weight::from_parts(1_363_449, 0) + // Standard Error: 8_964 + .saturating_add(Weight::from_parts(2_665_759, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Common CollectionPropertyPermissions (r:1 w:0) @@ -871,10 +892,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `561 + b * (33291 ±0)` // Estimated: `36269` - // Minimum execution time: 1_014_000 picoseconds. - Weight::from_parts(1_065_000, 36269) - // Standard Error: 39_536 - .saturating_add(Weight::from_parts(24_125_838, 0).saturating_mul(b.into())) + // Minimum execution time: 320_000 picoseconds. + Weight::from_parts(370_000, 36269) + // Standard Error: 28_541 + .saturating_add(Weight::from_parts(9_863_065, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -886,8 +907,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 10_315_000 picoseconds. - Weight::from_parts(10_601_000, 3554) + // Minimum execution time: 6_320_000 picoseconds. + Weight::from_parts(6_640_000, 3554) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -897,8 +918,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `6118` - // Minimum execution time: 4_898_000 picoseconds. - Weight::from_parts(5_136_000, 6118) + // Minimum execution time: 2_520_000 picoseconds. + Weight::from_parts(2_680_000, 6118) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: Refungible CollectionAllowance (r:0 w:1) @@ -907,8 +928,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_146_000 picoseconds. - Weight::from_parts(4_337_000, 0) + // Minimum execution time: 2_070_000 picoseconds. + Weight::from_parts(2_230_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: Refungible CollectionAllowance (r:1 w:0) @@ -917,8 +938,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 2_170_000 picoseconds. - Weight::from_parts(2_301_000, 3576) + // Minimum execution time: 1_270_000 picoseconds. + Weight::from_parts(1_420_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: Refungible TokenProperties (r:1 w:1) @@ -927,8 +948,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 2_098_000 picoseconds. - Weight::from_parts(2_251_000, 36269) + // Minimum execution time: 1_010_000 picoseconds. + Weight::from_parts(1_160_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/structure/src/lib.rs b/pallets/structure/src/lib.rs index 2014cc44b7..127983607d 100644 --- a/pallets/structure/src/lib.rs +++ b/pallets/structure/src/lib.rs @@ -53,11 +53,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::{ - dispatch::{DispatchResult, DispatchResultWithPostInfo}, - fail, - pallet_prelude::*, -}; +use frame_support::{dispatch::DispatchResult, fail, pallet_prelude::*}; use pallet_common::{ dispatch::CollectionDispatch, erc::CrossAccountId, eth::is_collection, CommonCollectionOperations, @@ -269,22 +265,6 @@ impl Pallet { Err(>::DepthLimit.into()) } - /// Burn token and all of it's nested tokens - /// - /// - `self_budget`: Limit for searching children in depth. - /// - `breadth_budget`: Limit of breadth of searching children. - pub fn burn_item_recursively( - from: T::CrossAccountId, - collection: CollectionId, - token: TokenId, - self_budget: &dyn Budget, - breadth_budget: &dyn Budget, - ) -> DispatchResultWithPostInfo { - let dispatch = T::CollectionDispatch::dispatch(collection)?; - let dispatch = dispatch.as_dyn(); - dispatch.burn_item_recursively(from, token, self_budget, breadth_budget) - } - /// Check if `token` indirectly owned by `user` /// /// Returns `true` if `user` is `token`'s owner. Or If token is provided as `user` then diff --git a/pallets/unique/Cargo.toml b/pallets/unique/Cargo.toml index 9af8e3653a..d6e9265f85 100644 --- a/pallets/unique/Cargo.toml +++ b/pallets/unique/Cargo.toml @@ -31,7 +31,9 @@ std = [ 'parity-scale-codec/std', 'sp-runtime/std', 'sp-std/std', + 'up-common/std', 'up-data-structs/std', + 'pallet-structure/std', ] stubgen = ["evm-coder/stubgen", "pallet-common/stubgen"] try-runtime = ["frame-support/try-runtime"] @@ -53,9 +55,11 @@ pallet-evm = { workspace = true } pallet-evm-coder-substrate = { workspace = true } pallet-nonfungible = { workspace = true } pallet-refungible = { workspace = true } +pallet-structure = { workspace = true } scale-info = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } +up-common = { workspace = true } up-data-structs = { workspace = true } diff --git a/pallets/unique/src/lib.rs b/pallets/unique/src/lib.rs index 1c098beb18..00f3fea64e 100644 --- a/pallets/unique/src/lib.rs +++ b/pallets/unique/src/lib.rs @@ -84,13 +84,20 @@ pub mod weights; #[frame_support::pallet] pub mod pallet { - use frame_support::{dispatch::DispatchResult, ensure, fail, storage::Key, BoundedVec}; + use frame_support::{ + dispatch::{DispatchErrorWithPostInfo, DispatchResult, PostDispatchInfo}, + ensure, fail, + storage::Key, + BoundedVec, + }; use frame_system::{ensure_root, ensure_signed}; use pallet_common::{ dispatch::{dispatch_tx, CollectionDispatch}, - CollectionHandle, CommonWeightInfo, Pallet as PalletCommon, RefungibleExtensionsWeightInfo, + CollectionHandle, CommonCollectionOperations, CommonWeightInfo, Pallet as PalletCommon, + RefungibleExtensionsWeightInfo, }; use pallet_evm::account::CrossAccountId; + use pallet_structure::weights::WeightInfo as StructureWeightInfo; use scale_info::TypeInfo; use sp_std::{vec, vec::Vec}; use up_data_structs::{ @@ -105,9 +112,6 @@ pub mod pallet { use super::*; - /// A maximum number of levels of depth in the token nesting tree. - pub const NESTING_BUDGET: u32 = 5; - /// Errors for the common Unique transactions. #[pallet::error] pub enum Error { @@ -128,6 +132,8 @@ pub mod pallet { /// Weight information for common pallet operations. type CommonWeightInfo: CommonWeightInfo; + type StructureWeightInfo: StructureWeightInfo; + /// Weight info information for extra refungible pallet operations. type RefungibleExtensionsWeightInfo: RefungibleExtensionsWeightInfo; } @@ -264,7 +270,7 @@ pub mod pallet { impl Pallet { /// A maximum number of levels of depth in the token nesting tree. fn nesting_budget() -> u32 { - NESTING_BUDGET + 5 } /// Maximal length of a collection name. @@ -666,7 +672,7 @@ pub mod pallet { /// * `owner`: Address of the initial owner of the item. /// * `data`: Token data describing the item to store on chain. #[pallet::call_index(11)] - #[pallet::weight(T::CommonWeightInfo::create_item(data))] + #[pallet::weight(T::CommonWeightInfo::create_item(data) + >::nesting_budget_predispatch_weight())] pub fn create_item( origin: OriginFor, collection_id: CollectionId, @@ -674,9 +680,9 @@ pub mod pallet { data: CreateItemData, ) -> DispatchResultWithPostInfo { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); - let budget = budget::Value::new(NESTING_BUDGET); + let budget = Self::structure_nesting_budget(); - dispatch_tx::(collection_id, |d| { + Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { d.create_item(sender, owner, data, &budget) }) } @@ -700,7 +706,7 @@ pub mod pallet { /// * `owner`: Address of the initial owner of the tokens. /// * `items_data`: Vector of data describing each item to be created. #[pallet::call_index(12)] - #[pallet::weight(T::CommonWeightInfo::create_multiple_items(items_data))] + #[pallet::weight(T::CommonWeightInfo::create_multiple_items(items_data) + >::nesting_budget_predispatch_weight())] pub fn create_multiple_items( origin: OriginFor, collection_id: CollectionId, @@ -709,9 +715,9 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { ensure!(!items_data.is_empty(), Error::::EmptyArgument); let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); - let budget = budget::Value::new(NESTING_BUDGET); + let budget = Self::structure_nesting_budget(); - dispatch_tx::(collection_id, |d| { + Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { d.create_multiple_items(sender, owner, items_data, &budget) }) } @@ -791,7 +797,7 @@ pub mod pallet { /// * `properties`: Vector of key-value pairs stored as the token's metadata. /// Keys support Latin letters, `-`, `_`, and `.` as symbols. #[pallet::call_index(15)] - #[pallet::weight(T::CommonWeightInfo::set_token_properties(properties.len() as u32))] + #[pallet::weight(T::CommonWeightInfo::set_token_properties(properties.len() as u32) + >::nesting_budget_predispatch_weight())] pub fn set_token_properties( origin: OriginFor, collection_id: CollectionId, @@ -801,9 +807,9 @@ pub mod pallet { ensure!(!properties.is_empty(), Error::::EmptyArgument); let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); - let budget = budget::Value::new(NESTING_BUDGET); + let budget = Self::structure_nesting_budget(); - dispatch_tx::(collection_id, |d| { + Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { d.set_token_properties(sender, token_id, properties, &budget) }) } @@ -824,7 +830,7 @@ pub mod pallet { /// * `property_keys`: Vector of keys of the properties to be deleted. /// Keys support Latin letters, `-`, `_`, and `.` as symbols. #[pallet::call_index(16)] - #[pallet::weight(T::CommonWeightInfo::delete_token_properties(property_keys.len() as u32))] + #[pallet::weight(T::CommonWeightInfo::delete_token_properties(property_keys.len() as u32) + >::nesting_budget_predispatch_weight())] pub fn delete_token_properties( origin: OriginFor, collection_id: CollectionId, @@ -834,9 +840,9 @@ pub mod pallet { ensure!(!property_keys.is_empty(), Error::::EmptyArgument); let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); - let budget = budget::Value::new(NESTING_BUDGET); + let budget = Self::structure_nesting_budget(); - dispatch_tx::(collection_id, |d| { + Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { d.delete_token_properties(sender, token_id, property_keys, &budget) }) } @@ -888,16 +894,16 @@ pub mod pallet { /// * `collection_id`: ID of the collection to which the tokens would belong. /// * `data`: Explicit item creation data. #[pallet::call_index(18)] - #[pallet::weight(T::CommonWeightInfo::create_multiple_items_ex(data))] + #[pallet::weight(T::CommonWeightInfo::create_multiple_items_ex(data) + >::nesting_budget_predispatch_weight())] pub fn create_multiple_items_ex( origin: OriginFor, collection_id: CollectionId, data: CreateItemExData, ) -> DispatchResultWithPostInfo { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); - let budget = budget::Value::new(NESTING_BUDGET); + let budget = Self::structure_nesting_budget(); - dispatch_tx::(collection_id, |d| { + Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { d.create_multiple_items_ex(sender, data, &budget) }) } @@ -995,7 +1001,7 @@ pub mod pallet { /// * Fungible Mode: The desired number of pieces to burn. /// * Re-Fungible Mode: The desired number of pieces to burn. #[pallet::call_index(21)] - #[pallet::weight(T::CommonWeightInfo::burn_from())] + #[pallet::weight(T::CommonWeightInfo::burn_from() + >::nesting_budget_predispatch_weight())] pub fn burn_from( origin: OriginFor, collection_id: CollectionId, @@ -1004,9 +1010,9 @@ pub mod pallet { value: u128, ) -> DispatchResultWithPostInfo { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); - let budget = budget::Value::new(NESTING_BUDGET); + let budget = Self::structure_nesting_budget(); - dispatch_tx::(collection_id, |d| { + Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { d.burn_from(sender, from, item_id, value, &budget) }) } @@ -1033,7 +1039,7 @@ pub mod pallet { /// * Fungible Mode: The desired number of pieces to transfer. /// * Re-Fungible Mode: The desired number of pieces to transfer. #[pallet::call_index(22)] - #[pallet::weight(T::CommonWeightInfo::transfer())] + #[pallet::weight(T::CommonWeightInfo::transfer() + >::nesting_budget_predispatch_weight())] pub fn transfer( origin: OriginFor, recipient: T::CrossAccountId, @@ -1042,9 +1048,9 @@ pub mod pallet { value: u128, ) -> DispatchResultWithPostInfo { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); - let budget = budget::Value::new(NESTING_BUDGET); + let budget = Self::structure_nesting_budget(); - dispatch_tx::(collection_id, |d| { + Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { d.transfer(sender, recipient, item_id, value, &budget) }) } @@ -1138,7 +1144,7 @@ pub mod pallet { /// * Fungible Mode: The desired number of pieces to transfer. /// * Re-Fungible Mode: The desired number of pieces to transfer. #[pallet::call_index(25)] - #[pallet::weight(T::CommonWeightInfo::transfer_from())] + #[pallet::weight(T::CommonWeightInfo::transfer_from() + >::nesting_budget_predispatch_weight())] pub fn transfer_from( origin: OriginFor, from: T::CrossAccountId, @@ -1148,9 +1154,9 @@ pub mod pallet { value: u128, ) -> DispatchResultWithPostInfo { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); - let budget = budget::Value::new(NESTING_BUDGET); + let budget = Self::structure_nesting_budget(); - dispatch_tx::(collection_id, |d| { + Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { d.transfer_from(sender, from, recipient, item_id, value, &budget) }) } @@ -1348,5 +1354,44 @@ pub mod pallet { Ok(()) } + + fn structure_nesting_budget() -> budget::Value { + budget::Value::new(Self::nesting_budget()) + } + + fn nesting_budget_weight(value: &budget::Value) -> Weight { + T::StructureWeightInfo::find_parent().saturating_mul(value.remaining() as u64) + } + + fn nesting_budget_predispatch_weight() -> Weight { + Self::nesting_budget_weight(&Self::structure_nesting_budget()) + } + + pub fn dispatch_tx_with_nesting_budget< + C: FnOnce(&dyn CommonCollectionOperations) -> DispatchResultWithPostInfo, + >( + collection: CollectionId, + budget: &budget::Value, + call: C, + ) -> DispatchResultWithPostInfo { + let mut result = dispatch_tx::(collection, call); + + match &mut result { + Ok(PostDispatchInfo { + actual_weight: Some(weight), + .. + }) + | Err(DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some(weight), + .. + }, + .. + }) => *weight += Self::nesting_budget_weight(budget), + _ => {} + } + + result + } } } diff --git a/primitives/common/src/constants.rs b/primitives/common/src/constants.rs index 11ed1903ed..ced66a2f6c 100644 --- a/primitives/common/src/constants.rs +++ b/primitives/common/src/constants.rs @@ -45,6 +45,7 @@ pub const UNIQUE: Balance = 100 * CENTIUNIQUE; /// Minimum balance required to create or keep an account open. pub const EXISTENTIAL_DEPOSIT: u128 = 0; + /// Amount of Balance reserved for candidate registration. pub const GENESIS_LICENSE_BOND: u128 = 1_000_000_000_000 * UNIQUE; /// Amount of maximum collators for Collator Selection. diff --git a/primitives/data-structs/src/budget.rs b/primitives/data-structs/src/budget.rs index ecc4d539a3..c71c79808f 100644 --- a/primitives/data-structs/src/budget.rs +++ b/primitives/data-structs/src/budget.rs @@ -1,4 +1,4 @@ -use core::cell::Cell; +use sp_std::cell::Cell; pub trait Budget { /// Returns true while not exceeded @@ -22,7 +22,7 @@ impl Value { pub fn new(v: u32) -> Self { Self(Cell::new(v)) } - pub fn refund(self) -> u32 { + pub fn remaining(&self) -> u32 { self.0.get() } } diff --git a/runtime/common/config/pallets/mod.rs b/runtime/common/config/pallets/mod.rs index 1d7abdc768..ee0694582c 100644 --- a/runtime/common/config/pallets/mod.rs +++ b/runtime/common/config/pallets/mod.rs @@ -116,6 +116,7 @@ impl pallet_inflation::Config for Runtime { impl pallet_unique::Config for Runtime { type WeightInfo = pallet_unique::weights::SubstrateWeight; type CommonWeightInfo = CommonWeights; + type StructureWeightInfo = pallet_structure::weights::SubstrateWeight; type RefungibleExtensionsWeightInfo = CommonWeights; } diff --git a/runtime/common/runtime_apis.rs b/runtime/common/runtime_apis.rs index 228df8cd3f..04910ae2dc 100644 --- a/runtime/common/runtime_apis.rs +++ b/runtime/common/runtime_apis.rs @@ -84,7 +84,7 @@ macro_rules! impl_common_runtime_apis { } fn topmost_token_owner(collection: CollectionId, token: TokenId) -> Result, DispatchError> { - let budget = up_data_structs::budget::Value::new(10); + let budget = budget::Value::new(10); >::find_topmost_owner(collection, token, &budget) } diff --git a/runtime/common/weights/mod.rs b/runtime/common/weights/mod.rs index 224695b17b..03b35ce510 100644 --- a/runtime/common/weights/mod.rs +++ b/runtime/common/weights/mod.rs @@ -98,10 +98,6 @@ where dispatch_weight::() + max_weight_of!(set_token_properties(amount)) } - fn delete_token_properties(amount: u32) -> Weight { - dispatch_weight::() + max_weight_of!(delete_token_properties(amount)) - } - fn set_token_property_permissions(amount: u32) -> Weight { dispatch_weight::() + max_weight_of!(set_token_property_permissions(amount)) } @@ -126,24 +122,12 @@ where dispatch_weight::() + max_weight_of!(burn_from()) } - fn burn_recursively_self_raw() -> Weight { - max_weight_of!(burn_recursively_self_raw()) - } - - fn burn_recursively_breadth_raw(amount: u32) -> Weight { - max_weight_of!(burn_recursively_breadth_raw(amount)) - } - - fn token_owner() -> Weight { - max_weight_of!(token_owner()) - } - fn set_allowance_for_all() -> Weight { - max_weight_of!(set_allowance_for_all()) + dispatch_weight::() + max_weight_of!(set_allowance_for_all()) } fn force_repair_item() -> Weight { - max_weight_of!(force_repair_item()) + dispatch_weight::() + max_weight_of!(force_repair_item()) } } From 5a68a95f44d7020a750834ab481a946db3e59c26 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Wed, 11 Oct 2023 20:58:38 +0200 Subject: [PATCH 120/143] fix: usage of nesting_budget in pallet-unique --- pallets/unique/src/lib.rs | 97 ++++++++++++++++----------- primitives/data-structs/src/budget.rs | 2 +- 2 files changed, 59 insertions(+), 40 deletions(-) diff --git a/pallets/unique/src/lib.rs b/pallets/unique/src/lib.rs index 00f3fea64e..d88fc872f4 100644 --- a/pallets/unique/src/lib.rs +++ b/pallets/unique/src/lib.rs @@ -93,8 +93,7 @@ pub mod pallet { use frame_system::{ensure_root, ensure_signed}; use pallet_common::{ dispatch::{dispatch_tx, CollectionDispatch}, - CollectionHandle, CommonCollectionOperations, CommonWeightInfo, Pallet as PalletCommon, - RefungibleExtensionsWeightInfo, + CollectionHandle, CommonWeightInfo, Pallet as PalletCommon, RefungibleExtensionsWeightInfo, }; use pallet_evm::account::CrossAccountId; use pallet_structure::weights::WeightInfo as StructureWeightInfo; @@ -682,9 +681,12 @@ pub mod pallet { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); let budget = Self::structure_nesting_budget(); - Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { - d.create_item(sender, owner, data, &budget) - }) + Self::refund_nesting_budget( + dispatch_tx::(collection_id, |d| { + d.create_item(sender, owner, data, &budget) + }), + budget, + ) } /// Create multiple items within a collection. @@ -717,9 +719,12 @@ pub mod pallet { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); let budget = Self::structure_nesting_budget(); - Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { - d.create_multiple_items(sender, owner, items_data, &budget) - }) + Self::refund_nesting_budget( + dispatch_tx::(collection_id, |d| { + d.create_multiple_items(sender, owner, items_data, &budget) + }), + budget, + ) } /// Add or change collection properties. @@ -809,9 +814,12 @@ pub mod pallet { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); let budget = Self::structure_nesting_budget(); - Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { - d.set_token_properties(sender, token_id, properties, &budget) - }) + Self::refund_nesting_budget( + dispatch_tx::(collection_id, |d| { + d.set_token_properties(sender, token_id, properties, &budget) + }), + budget, + ) } /// Delete specified token properties. Currently properties only work with NFTs. @@ -842,9 +850,12 @@ pub mod pallet { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); let budget = Self::structure_nesting_budget(); - Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { - d.delete_token_properties(sender, token_id, property_keys, &budget) - }) + Self::refund_nesting_budget( + dispatch_tx::(collection_id, |d| { + d.delete_token_properties(sender, token_id, property_keys, &budget) + }), + budget, + ) } /// Add or change token property permissions of a collection. @@ -903,9 +914,12 @@ pub mod pallet { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); let budget = Self::structure_nesting_budget(); - Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { - d.create_multiple_items_ex(sender, data, &budget) - }) + Self::refund_nesting_budget( + dispatch_tx::(collection_id, |d| { + d.create_multiple_items_ex(sender, data, &budget) + }), + budget, + ) } /// Completely allow or disallow transfers for a particular collection. @@ -1012,9 +1026,12 @@ pub mod pallet { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); let budget = Self::structure_nesting_budget(); - Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { - d.burn_from(sender, from, item_id, value, &budget) - }) + Self::refund_nesting_budget( + dispatch_tx::(collection_id, |d| { + d.burn_from(sender, from, item_id, value, &budget) + }), + budget, + ) } /// Change ownership of the token. @@ -1050,9 +1067,12 @@ pub mod pallet { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); let budget = Self::structure_nesting_budget(); - Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { - d.transfer(sender, recipient, item_id, value, &budget) - }) + Self::refund_nesting_budget( + dispatch_tx::(collection_id, |d| { + d.transfer(sender, recipient, item_id, value, &budget) + }), + budget, + ) } /// Allow a non-permissioned address to transfer or burn an item. @@ -1156,9 +1176,12 @@ pub mod pallet { let sender = T::CrossAccountId::from_sub(ensure_signed(origin)?); let budget = Self::structure_nesting_budget(); - Self::dispatch_tx_with_nesting_budget(collection_id, &budget, |d| { - d.transfer_from(sender, from, recipient, item_id, value, &budget) - }) + Self::refund_nesting_budget( + dispatch_tx::(collection_id, |d| { + d.transfer_from(sender, from, recipient, item_id, value, &budget) + }), + budget, + ) } /// Set specific limits of a collection. Empty, or None fields mean chain default. @@ -1359,22 +1382,16 @@ pub mod pallet { budget::Value::new(Self::nesting_budget()) } - fn nesting_budget_weight(value: &budget::Value) -> Weight { - T::StructureWeightInfo::find_parent().saturating_mul(value.remaining() as u64) - } - fn nesting_budget_predispatch_weight() -> Weight { - Self::nesting_budget_weight(&Self::structure_nesting_budget()) + T::StructureWeightInfo::find_parent().saturating_mul(Self::nesting_budget() as u64) } - pub fn dispatch_tx_with_nesting_budget< - C: FnOnce(&dyn CommonCollectionOperations) -> DispatchResultWithPostInfo, - >( - collection: CollectionId, - budget: &budget::Value, - call: C, + pub fn refund_nesting_budget( + mut result: DispatchResultWithPostInfo, + budget: budget::Value, ) -> DispatchResultWithPostInfo { - let mut result = dispatch_tx::(collection, call); + let refund_amount = budget.refund_amount(); + let consumed = Self::nesting_budget() - refund_amount; match &mut result { Ok(PostDispatchInfo { @@ -1387,7 +1404,9 @@ pub mod pallet { .. }, .. - }) => *weight += Self::nesting_budget_weight(budget), + }) => { + *weight += T::StructureWeightInfo::find_parent().saturating_mul(consumed as u64) + } _ => {} } diff --git a/primitives/data-structs/src/budget.rs b/primitives/data-structs/src/budget.rs index c71c79808f..80c4ce3565 100644 --- a/primitives/data-structs/src/budget.rs +++ b/primitives/data-structs/src/budget.rs @@ -22,7 +22,7 @@ impl Value { pub fn new(v: u32) -> Self { Self(Cell::new(v)) } - pub fn remaining(&self) -> u32 { + pub fn refund_amount(self) -> u32 { self.0.get() } } From 5ec9dbee0705c3188e36da143cb9445549a79afa Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Wed, 11 Oct 2023 21:45:51 +0200 Subject: [PATCH 121/143] refactor: LazyValue without generic Fn --- pallets/common/src/lib.rs | 192 ++++++++++++++++---------------------- 1 file changed, 78 insertions(+), 114 deletions(-) diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index 8248a84329..b2c98c5314 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -53,10 +53,12 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; +use alloc::boxed::Box; use core::{ marker::PhantomData, ops::{Deref, DerefMut}, slice::from_ref, + unreachable, }; use evm_coder::ToLog; @@ -871,63 +873,81 @@ pub mod pallet { >; } +enum LazyValueState<'a, T> { + Pending(Box T + 'a>), + InProgress(PhantomData>), + Computed(T), +} + /// Value representation with delayed initialization time. -pub struct LazyValue { - value: Option, - f: Option, +pub struct LazyValue<'a, T> { + state: LazyValueState<'a, T>, } -impl T> LazyValue { +impl<'a, T> LazyValue<'a, T> { /// Create a new LazyValue. - pub fn new(f: F) -> Self { + pub fn new(f: impl FnOnce() -> T + 'a) -> Self { Self { - value: None, - f: Some(f), + state: LazyValueState::Pending(Box::new(f)), } } /// Get the value. If it is called the first time, the value will be initialized. pub fn value(&mut self) -> &T { self.force_value(); - self.value.as_ref().unwrap() + self.value_mut() } /// Get the value. If it is called the first time, the value will be initialized. pub fn value_mut(&mut self) -> &mut T { self.force_value(); - self.value.as_mut().unwrap() + + if let LazyValueState::Computed(value) = &mut self.state { + value + } else { + unreachable!() + } } fn into_inner(mut self) -> T { self.force_value(); - self.value.unwrap() + if let LazyValueState::Computed(value) = self.state { + value + } else { + unreachable!() + } } /// Is value initialized? pub fn has_value(&self) -> bool { - self.value.is_some() + matches!(self.state, LazyValueState::Computed(_)) } fn force_value(&mut self) { - if self.value.is_none() { - self.value = Some(self.f.take().unwrap()()) + use LazyValueState::*; + + if self.has_value() { + return; + } + + match sp_std::mem::replace(&mut self.state, InProgress(PhantomData)) { + Pending(f) => self.state = Computed(f()), + _ => { + // Computed is ruled out by the above condition + // InProgress is ruled out by not implementing Sync and absence of recursion + unreachable!() + } } } } -fn check_token_permissions( +fn check_token_permissions( collection_admin_permitted: bool, token_owner_permitted: bool, - is_collection_admin: &mut LazyValue, - is_token_owner: &mut LazyValue, FTO>, - is_token_exist: &mut LazyValue, -) -> DispatchResult -where - T: Config, - FCA: FnOnce() -> bool, - FTO: FnOnce() -> Result, - FTE: FnOnce() -> bool, -{ + is_collection_admin: &mut LazyValue, + is_token_owner: &mut LazyValue>, + is_token_exist: &mut LazyValue, +) -> DispatchResult { if !(collection_admin_permitted && *is_collection_admin.value() || token_owner_permitted && (*is_token_owner.value())?) { @@ -2346,36 +2366,24 @@ impl From for Error { /// This type utilizes the lazy evaluation to avoid repeating the computation /// of several performance-heavy or PoV-heavy tasks, /// such as checking the indirect ownership or reading the token property permissions. -pub struct PropertyWriter<'a, WriterVariant, T, Handle, FIsAdmin, FPropertyPermissions> { +pub struct PropertyWriter<'a, WriterVariant, T, Handle> { collection: &'a Handle, - collection_lazy_info: PropertyWriterLazyCollectionInfo, + collection_lazy_info: PropertyWriterLazyCollectionInfo<'a>, _phantom: PhantomData<(T, WriterVariant)>, } -impl<'a, T, Handle, WriterVariant, FIsAdmin, FPropertyPermissions> - PropertyWriter<'a, WriterVariant, T, Handle, FIsAdmin, FPropertyPermissions> +impl<'a, T, Handle, WriterVariant> PropertyWriter<'a, WriterVariant, T, Handle> where T: Config, Handle: CommonCollectionOperations + Deref>, - FIsAdmin: FnOnce() -> bool, - FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, { - fn internal_write_token_properties( + fn internal_write_token_properties( &mut self, token_id: TokenId, - mut token_lazy_info: PropertyWriterLazyTokenInfo< - FCheckTokenExist, - FCheckTokenOwner, - FGetProperties, - >, + mut token_lazy_info: PropertyWriterLazyTokenInfo, properties_updates: impl Iterator)>, log: evm_coder::ethereum::Log, - ) -> DispatchResult - where - FCheckTokenExist: FnOnce() -> bool, - FCheckTokenOwner: FnOnce() -> Result, - FGetProperties: FnOnce() -> TokenProperties, - { + ) -> DispatchResult { for (key, value) in properties_updates { let permission = self .collection_lazy_info @@ -2400,7 +2408,7 @@ where collection_admin, token_owner, .. - } => check_token_permissions::( + } => check_token_permissions::( collection_admin, token_owner, &mut self.collection_lazy_info.is_collection_admin, @@ -2454,31 +2462,25 @@ where /// A helper structure for the [`PropertyWriter`] that holds /// the collection-related info. The info is loaded using lazy evaluation. /// This info is common for any token for which we write properties. -pub struct PropertyWriterLazyCollectionInfo { - is_collection_admin: LazyValue, - property_permissions: LazyValue, +pub struct PropertyWriterLazyCollectionInfo<'a> { + is_collection_admin: LazyValue<'a, bool>, + property_permissions: LazyValue<'a, PropertiesPermissionMap>, } /// A helper structure for the [`PropertyWriter`] that holds /// the token-related info. The info is loaded using lazy evaluation. -pub struct PropertyWriterLazyTokenInfo { - is_token_exist: LazyValue, - is_token_owner: LazyValue, FCheckTokenOwner>, - stored_properties: LazyValue, +pub struct PropertyWriterLazyTokenInfo<'a> { + is_token_exist: LazyValue<'a, bool>, + is_token_owner: LazyValue<'a, Result>, + stored_properties: LazyValue<'a, TokenProperties>, } -impl - PropertyWriterLazyTokenInfo -where - FCheckTokenExist: FnOnce() -> bool, - FCheckTokenOwner: FnOnce() -> Result, - FGetProperties: FnOnce() -> TokenProperties, -{ +impl<'a> PropertyWriterLazyTokenInfo<'a> { /// Create a lazy token info. pub fn new( - check_token_exist: FCheckTokenExist, - check_token_owner: FCheckTokenOwner, - get_token_properties: FGetProperties, + check_token_exist: impl FnOnce() -> bool + 'a, + check_token_owner: impl FnOnce() -> Result + 'a, + get_token_properties: impl FnOnce() -> TokenProperties + 'a, ) -> Self { Self { is_token_exist: LazyValue::new(check_token_exist), @@ -2496,14 +2498,7 @@ impl NewTokenPropertyWriter { pub fn new<'a, Handle>( collection: &'a Handle, sender: &'a T::CrossAccountId, - ) -> PropertyWriter< - 'a, - Self, - T, - Handle, - impl FnOnce() -> bool + 'a, - impl FnOnce() -> PropertiesPermissionMap + 'a, - > + ) -> PropertyWriter<'a, Self, T, Handle> where T: Config, Handle: CommonCollectionOperations + Deref>, @@ -2521,13 +2516,10 @@ impl NewTokenPropertyWriter { } } -impl<'a, T, Handle, FIsAdmin, FPropertyPermissions> - PropertyWriter<'a, NewTokenPropertyWriter, T, Handle, FIsAdmin, FPropertyPermissions> +impl<'a, T, Handle> PropertyWriter<'a, NewTokenPropertyWriter, T, Handle> where T: Config, Handle: CommonCollectionOperations + Deref>, - FIsAdmin: FnOnce() -> bool, - FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, { /// A function to write properties to a **newly created** token. pub fn write_token_properties( @@ -2570,14 +2562,7 @@ impl ExistingTokenPropertyWriter { pub fn new<'a, Handle>( collection: &'a Handle, sender: &'a T::CrossAccountId, - ) -> PropertyWriter< - 'a, - Self, - T, - Handle, - impl FnOnce() -> bool + 'a, - impl FnOnce() -> PropertiesPermissionMap + 'a, - > + ) -> PropertyWriter<'a, Self, T, Handle> where Handle: CommonCollectionOperations + Deref>, { @@ -2594,13 +2579,10 @@ impl ExistingTokenPropertyWriter { } } -impl<'a, T, Handle, FIsAdmin, FPropertyPermissions> - PropertyWriter<'a, ExistingTokenPropertyWriter, T, Handle, FIsAdmin, FPropertyPermissions> +impl<'a, T, Handle> PropertyWriter<'a, ExistingTokenPropertyWriter, T, Handle> where T: Config, Handle: CommonCollectionOperations + Deref>, - FIsAdmin: FnOnce() -> bool, - FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, { /// A function to write properties to an **already existing** token. pub fn write_token_properties( @@ -2643,14 +2625,12 @@ pub struct BenchmarkPropertyWriter(PhantomData); #[cfg(feature = "runtime-benchmarks")] impl BenchmarkPropertyWriter { /// Creates a [`PropertyWriter`] for benchmarking tokens properties writing. - pub fn new<'a, Handle, FIsAdmin, FPropertyPermissions>( + pub fn new<'a, Handle>( collection: &Handle, - collection_lazy_info: PropertyWriterLazyCollectionInfo, - ) -> PropertyWriter + collection_lazy_info: PropertyWriterLazyCollectionInfo, + ) -> PropertyWriter<'a, Self, T, Handle> where Handle: CommonCollectionOperations + Deref>, - FIsAdmin: FnOnce() -> bool, - FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, { PropertyWriter { collection, @@ -2663,10 +2643,7 @@ impl BenchmarkPropertyWriter { pub fn load_collection_info( collection_handle: &Handle, sender: &T::CrossAccountId, - ) -> PropertyWriterLazyCollectionInfo< - impl FnOnce() -> bool, - impl FnOnce() -> PropertiesPermissionMap, - > + ) -> PropertyWriterLazyCollectionInfo<'static> where Handle: Deref>, { @@ -2683,11 +2660,7 @@ impl BenchmarkPropertyWriter { pub fn load_token_properties( collection: &Handle, token_id: TokenId, - ) -> PropertyWriterLazyTokenInfo< - impl FnOnce() -> bool, - impl FnOnce() -> Result, - impl FnOnce() -> TokenProperties, - > + ) -> PropertyWriterLazyTokenInfo where Handle: CommonCollectionOperations, { @@ -2704,13 +2677,10 @@ impl BenchmarkPropertyWriter { } #[cfg(feature = "runtime-benchmarks")] -impl<'a, T, Handle, FIsAdmin, FPropertyPermissions> - PropertyWriter<'a, BenchmarkPropertyWriter, T, Handle, FIsAdmin, FPropertyPermissions> +impl<'a, T, Handle> PropertyWriter<'a, BenchmarkPropertyWriter, T, Handle> where T: Config, Handle: CommonCollectionOperations + Deref>, - FIsAdmin: FnOnce() -> bool, - FPropertyPermissions: FnOnce() -> PropertiesPermissionMap, { /// A function to benchmark the writing of token properties. pub fn write_token_properties( @@ -2826,20 +2796,14 @@ pub mod tests { /* 15*/ TestCase::new(1, 1, 1, 1, 0), ]; - pub fn check_token_permissions( + pub fn check_token_permissions( collection_admin_permitted: bool, token_owner_permitted: bool, - is_collection_admin: &mut LazyValue, - check_token_ownership: &mut LazyValue, FTO>, - check_token_existence: &mut LazyValue, - ) -> DispatchResult - where - T: Config, - FCA: FnOnce() -> bool, - FTO: FnOnce() -> Result, - FTE: FnOnce() -> bool, - { - crate::check_token_permissions::( + is_collection_admin: &mut LazyValue, + check_token_ownership: &mut LazyValue>, + check_token_existence: &mut LazyValue, + ) -> DispatchResult { + crate::check_token_permissions::( collection_admin_permitted, token_owner_permitted, is_collection_admin, From da2e878d93b2e346d24ce25e108860425188ef0a Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Thu, 12 Oct 2023 14:15:19 +0200 Subject: [PATCH 122/143] fix: properties consumed space tests --- tests/src/util/playgrounds/unique.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/src/util/playgrounds/unique.ts b/tests/src/util/playgrounds/unique.ts index 24bfc0d8d6..c95c22ff91 100644 --- a/tests/src/util/playgrounds/unique.ts +++ b/tests/src/util/playgrounds/unique.ts @@ -3121,7 +3121,7 @@ export class UniqueNFTCollection extends UniqueBaseCollection { const api = this.helper.getApi(); const props = (await api.query.nonfungible.tokenProperties(this.collectionId, tokenId)).toJSON(); - return (props! as any).consumedSpace; + return (props != null) ? (props as any).consumedSpace : 0; } async transferToken(signer: TSigner, tokenId: number, addressObj: ICrossAccountId) { @@ -3226,7 +3226,7 @@ export class UniqueRFTCollection extends UniqueBaseCollection { const api = this.helper.getApi(); const props = (await api.query.refungible.tokenProperties(this.collectionId, tokenId)).toJSON(); - return (props! as any).consumedSpace; + return (props != null) ? (props as any).consumedSpace : 0; } async transferToken(signer: TSigner, tokenId: number, addressObj: ICrossAccountId, amount = 1n) { From 5464b28967c4761ea2b643d0c9c9229212938f08 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Thu, 12 Oct 2023 14:30:12 +0200 Subject: [PATCH 123/143] fix: evm errors begin with a lower-case letter --- tests/src/eth/nativeFungible.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/src/eth/nativeFungible.test.ts b/tests/src/eth/nativeFungible.test.ts index ea8047bcc8..0c8e953daa 100644 --- a/tests/src/eth/nativeFungible.test.ts +++ b/tests/src/eth/nativeFungible.test.ts @@ -33,7 +33,7 @@ describe('NativeFungible: ERC20 calls', () => { const collectionAddress = helper.ethAddress.fromCollectionId(0); const contract = await helper.ethNativeContract.collection(collectionAddress, 'ft', owner); - await expect(contract.methods.approve(spender, 100).call({from: owner})).to.be.rejectedWith('Approve not supported'); + await expect(contract.methods.approve(spender, 100).call({from: owner})).to.be.rejectedWith('approve not supported'); }); itEth('balanceOf()', async ({helper}) => { @@ -170,4 +170,4 @@ describe('NativeFungible: ERC20UniqueExtensions calls', () => { await expect(contract.methods.transferFromCross(receiver, receiver, 50).call({from: owner.eth})).to.be.rejectedWith('no permission'); }); -}); \ No newline at end of file +}); From aae21d3640bbe4b36788a00db3adb1ea6fbdfad3 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Thu, 12 Oct 2023 14:55:38 +0200 Subject: [PATCH 124/143] fix: consumedSpace fallback --- tests/src/util/playgrounds/unique.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/src/util/playgrounds/unique.ts b/tests/src/util/playgrounds/unique.ts index c95c22ff91..3f21a2cadc 100644 --- a/tests/src/util/playgrounds/unique.ts +++ b/tests/src/util/playgrounds/unique.ts @@ -3119,9 +3119,9 @@ export class UniqueNFTCollection extends UniqueBaseCollection { async getTokenPropertiesConsumedSpace(tokenId: number): Promise { const api = this.helper.getApi(); - const props = (await api.query.nonfungible.tokenProperties(this.collectionId, tokenId)).toJSON(); + const props = (await api.query.nonfungible.tokenProperties(this.collectionId, tokenId)).toJSON() as any; - return (props != null) ? (props as any).consumedSpace : 0; + return props?.consumedSpace ?? 0; } async transferToken(signer: TSigner, tokenId: number, addressObj: ICrossAccountId) { @@ -3224,9 +3224,9 @@ export class UniqueRFTCollection extends UniqueBaseCollection { async getTokenPropertiesConsumedSpace(tokenId: number): Promise { const api = this.helper.getApi(); - const props = (await api.query.refungible.tokenProperties(this.collectionId, tokenId)).toJSON(); + const props = (await api.query.refungible.tokenProperties(this.collectionId, tokenId)).toJSON() as any; - return (props != null) ? (props as any).consumedSpace : 0; + return props?.consumedSpace ?? 0; } async transferToken(signer: TSigner, tokenId: number, addressObj: ICrossAccountId, amount = 1n) { From 742862849b4bfeaa04c5439268d2e42e63712a20 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Fri, 13 Oct 2023 12:39:15 +0200 Subject: [PATCH 125/143] fix: EVM mint with properties, minor improvements --- pallets/common/src/benchmarking.rs | 32 +-- pallets/common/src/erc.rs | 4 +- pallets/common/src/lib.rs | 4 +- pallets/nonfungible/src/benchmarking.rs | 252 +++--------------------- pallets/nonfungible/src/common.rs | 33 ++-- pallets/nonfungible/src/erc.rs | 26 ++- pallets/refungible/src/benchmarking.rs | 210 +++----------------- pallets/refungible/src/common.rs | 50 ++--- pallets/refungible/src/erc.rs | 49 +++-- 9 files changed, 148 insertions(+), 512 deletions(-) diff --git a/pallets/common/src/benchmarking.rs b/pallets/common/src/benchmarking.rs index 1945de7469..bb4bef2097 100644 --- a/pallets/common/src/benchmarking.rs +++ b/pallets/common/src/benchmarking.rs @@ -29,9 +29,8 @@ use sp_runtime::{traits::Zero, DispatchError}; use sp_std::{vec, vec::Vec}; use up_data_structs::{ AccessMode, CollectionId, CollectionMode, CollectionPermissions, CreateCollectionData, - NestingPermissions, PropertiesPermissionMap, Property, PropertyKey, PropertyValue, - MAX_COLLECTION_DESCRIPTION_LENGTH, MAX_COLLECTION_NAME_LENGTH, MAX_PROPERTIES_PER_ITEM, - MAX_TOKEN_PREFIX_LENGTH, + NestingPermissions, Property, PropertyKey, PropertyValue, MAX_COLLECTION_DESCRIPTION_LENGTH, + MAX_COLLECTION_NAME_LENGTH, MAX_PROPERTIES_PER_ITEM, MAX_TOKEN_PREFIX_LENGTH, }; use crate::{BenchmarkPropertyWriter, CollectionHandle, Config, Pallet}; @@ -195,31 +194,6 @@ mod benchmarks { Ok(()) } - #[benchmark] - fn delete_collection_properties( - b: Linear<0, MAX_PROPERTIES_PER_ITEM>, - ) -> Result<(), BenchmarkError> { - bench_init! { - owner: sub; collection: collection(owner); - owner: cross_from_sub; - }; - let props = (0..b) - .map(|p| Property { - key: property_key(p as usize), - value: property_value(), - }) - .collect::>(); - >::set_collection_properties(&collection, &owner, props.into_iter())?; - let to_delete = (0..b).map(|p| property_key(p as usize)).collect::>(); - - #[block] - { - >::delete_collection_properties(&collection, &owner, to_delete.into_iter())?; - } - - Ok(()) - } - #[benchmark] fn check_accesslist() -> Result<(), BenchmarkError> { bench_init! { @@ -253,7 +227,7 @@ mod benchmarks { } #[benchmark] - fn init_token_properties_common() -> Result<(), BenchmarkError> { + fn property_writer_load_collection_info() -> Result<(), BenchmarkError> { bench_init! { owner: sub; collection: collection(owner); sender: sub; diff --git a/pallets/common/src/erc.rs b/pallets/common/src/erc.rs index 4d0f946410..97e6ac831f 100644 --- a/pallets/common/src/erc.rs +++ b/pallets/common/src/erc.rs @@ -126,7 +126,7 @@ where /// /// @param key Property key. #[solidity(hide)] - #[weight(>::delete_collection_properties(1))] + #[weight(>::set_collection_properties(1))] fn delete_collection_property(&mut self, caller: Caller, key: String) -> Result<()> { let caller = T::CrossAccountId::from_eth(caller); let key = >::from(key) @@ -139,7 +139,7 @@ where /// Delete collection properties. /// /// @param keys Properties keys. - #[weight(>::delete_collection_properties(keys.len() as u32))] + #[weight(>::set_collection_properties(keys.len() as u32))] fn delete_collection_properties(&mut self, caller: Caller, keys: Vec) -> Result<()> { let caller = T::CrossAccountId::from_eth(caller); let keys = keys diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index b2c98c5314..c84c73ffed 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -2626,8 +2626,8 @@ pub struct BenchmarkPropertyWriter(PhantomData); impl BenchmarkPropertyWriter { /// Creates a [`PropertyWriter`] for benchmarking tokens properties writing. pub fn new<'a, Handle>( - collection: &Handle, - collection_lazy_info: PropertyWriterLazyCollectionInfo, + collection: &'a Handle, + collection_lazy_info: PropertyWriterLazyCollectionInfo<'a>, ) -> PropertyWriter<'a, Self, T, Handle> where Handle: CommonCollectionOperations + Deref>, diff --git a/pallets/nonfungible/src/benchmarking.rs b/pallets/nonfungible/src/benchmarking.rs index ec02d14b0a..93d206f679 100644 --- a/pallets/nonfungible/src/benchmarking.rs +++ b/pallets/nonfungible/src/benchmarking.rs @@ -18,7 +18,6 @@ use frame_benchmarking::v2::{account, benchmarks, BenchmarkError}; use pallet_common::{ bench_init, benchmarking::{create_collection_raw, property_key, property_value}, - CommonCollectionOperations, }; use sp_std::prelude::*; use up_data_structs::{ @@ -136,47 +135,6 @@ mod benchmarks { Ok(()) } - #[benchmark] - fn burn_recursively_self_raw() -> Result<(), BenchmarkError> { - bench_init! { - owner: sub; collection: collection(owner); - sender: cross_from_sub(owner); burner: cross_sub; - }; - let item = create_max_item(&collection, &sender, burner.clone())?; - - #[block] - { - >::burn_recursively(&collection, &burner, item, &Unlimited, &Unlimited)?; - } - - Ok(()) - } - - #[benchmark] - fn burn_recursively_breadth_plus_self_plus_self_per_each_raw( - b: Linear<0, 200>, - ) -> Result<(), BenchmarkError> { - bench_init! { - owner: sub; collection: collection(owner); - sender: cross_from_sub(owner); burner: cross_sub; - }; - let item = create_max_item(&collection, &sender, burner.clone())?; - for _ in 0..b { - create_max_item( - &collection, - &sender, - T::CrossTokenAddressMapping::token_to_address(collection.id, item), - )?; - } - - #[block] - { - >::burn_recursively(&collection, &burner, item, &Unlimited, &Unlimited)?; - } - - Ok(()) - } - #[benchmark] fn transfer_raw() -> Result<(), BenchmarkError> { bench_init! { @@ -262,116 +220,34 @@ mod benchmarks { { >::burn_from(&collection, &burner, &sender, item, &Unlimited)?; } - } - // set_token_properties { - // let b in 0..MAX_PROPERTIES_PER_ITEM; - // bench_init!{ - // owner: sub; collection: collection(owner); - // owner: cross_from_sub; - // }; - // let perms = (0..b).map(|k| PropertyKeyPermission { - // key: property_key(k as usize), - // permission: PropertyPermission { - // mutable: false, - // collection_admin: true, - // token_owner: true, - // }, - // }).collect::>(); - // >::set_token_property_permissions(&collection, &owner, perms)?; - // let props = (0..b).map(|k| Property { - // key: property_key(k as usize), - // value: property_value(), - // }).collect::>(); - // let item = create_max_item(&collection, &owner, owner.clone())?; - // }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?} - - // load_token_properties { - // bench_init!{ - // owner: sub; collection: collection(owner); - // owner: cross_from_sub; - // }; - - // let item = create_max_item(&collection, &owner, owner.clone())?; - // }: { - // pallet_common::BenchmarkPropertyWriter::::load_token_properties( - // &collection, - // item, - // ) - // } - - // write_token_properties { - // let b in 0..MAX_PROPERTIES_PER_ITEM; - // bench_init!{ - // owner: sub; collection: collection(owner); - // owner: cross_from_sub; - // }; - - // let perms = (0..b).map(|k| PropertyKeyPermission { - // key: property_key(k as usize), - // permission: PropertyPermission { - // mutable: false, - // collection_admin: true, - // token_owner: true, - // }, - // }).collect::>(); - // >::set_token_property_permissions(&collection, &owner, perms)?; - // let props = (0..b).map(|k| Property { - // key: property_key(k as usize), - // value: property_value(), - // }).collect::>(); - // let item = create_max_item(&collection, &owner, owner.clone())?; - - // let lazy_collection_info = pallet_common::BenchmarkPropertyWriter::::load_collection_info( - // &collection, - // &owner, - // ); - // }: { - // let mut property_writer = pallet_common::BenchmarkPropertyWriter::new(&collection, lazy_collection_info); - - // property_writer.write_token_properties( - // item, - // props.into_iter(), - // crate::erc::ERC721TokenEvent::TokenChanged { - // token_id: item.into(), - // } - // .to_log(T::ContractAddress::get()), - // )? - // } + Ok(()) + } #[benchmark] - fn set_token_property_permissions( - b: Linear<0, MAX_PROPERTIES_PER_ITEM>, - ) -> Result<(), BenchmarkError> { + fn load_token_properties() -> Result<(), BenchmarkError> { bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let perms = (0..b) - .map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: false, - collection_admin: false, - token_owner: false, - }, - }) - .collect::>(); + + let item = create_max_item(&collection, &owner, owner.clone())?; #[block] { - >::set_token_property_permissions(&collection, &owner, perms)?; + pallet_common::BenchmarkPropertyWriter::::load_token_properties(&collection, item); } Ok(()) } #[benchmark] - fn set_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { + fn write_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; + let perms = (0..b) .map(|k| PropertyKeyPermission { key: property_key(k as usize), @@ -391,71 +267,29 @@ mod benchmarks { .collect::>(); let item = create_max_item(&collection, &owner, owner.clone())?; + let lazy_collection_info = + pallet_common::BenchmarkPropertyWriter::::load_collection_info(&collection, &owner); + #[block] { - >::set_token_properties( - &collection, - &owner, + let mut property_writer = + pallet_common::BenchmarkPropertyWriter::new(&collection, lazy_collection_info); + + property_writer.write_token_properties( item, props.into_iter(), - &Unlimited, + crate::erc::ERC721TokenEvent::TokenChanged { + token_id: item.into(), + } + .to_log(T::ContractAddress::get()), )?; } Ok(()) } - // TODO: - #[benchmark] - fn init_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { - // bench_init! { - // owner: sub; collection: collection(owner); - // owner: cross_from_sub; - // }; - - // let perms = (0..b) - // .map(|k| PropertyKeyPermission { - // key: property_key(k as usize), - // permission: PropertyPermission { - // mutable: false, - // collection_admin: true, - // token_owner: true, - // }, - // }) - // .collect::>(); - // >::set_token_property_permissions(&collection, &owner, perms)?; - #[block] - {} - // let props = (0..b) - // .map(|k| Property { - // key: property_key(k as usize), - // value: property_value(), - // }) - // .collect::>(); - // let item = create_max_item(&collection, &owner, owner.clone())?; - - // let (is_collection_admin, property_permissions) = - // load_is_admin_and_property_permissions(&collection, &owner); - // #[block] - // { - // let mut property_writer = - // pallet_common::BenchmarkPropertyWriter::new(&collection, lazy_collection_info); - - // property_writer.write_token_properties( - // item, - // props.into_iter(), - // crate::erc::ERC721TokenEvent::TokenChanged { - // token_id: item.into(), - // } - // .to_log(T::ContractAddress::get()), - // )?; - // } - - Ok(()) - } - #[benchmark] - fn delete_token_properties( + fn set_token_property_permissions( b: Linear<0, MAX_PROPERTIES_PER_ITEM>, ) -> Result<(), BenchmarkError> { bench_init! { @@ -466,54 +300,16 @@ mod benchmarks { .map(|k| PropertyKeyPermission { key: property_key(k as usize), permission: PropertyPermission { - mutable: true, - collection_admin: true, - token_owner: true, + mutable: false, + collection_admin: false, + token_owner: false, }, }) .collect::>(); - >::set_token_property_permissions(&collection, &owner, perms)?; - let props = (0..b) - .map(|k| Property { - key: property_key(k as usize), - value: property_value(), - }) - .collect::>(); - let item = create_max_item(&collection, &owner, owner.clone())?; - >::set_token_properties( - &collection, - &owner, - item, - props.into_iter(), - &Unlimited, - )?; - let to_delete = (0..b).map(|k| property_key(k as usize)).collect::>(); #[block] { - >::delete_token_properties( - &collection, - &owner, - item, - to_delete.into_iter(), - &Unlimited, - )?; - } - - Ok(()) - } - - #[benchmark] - fn token_owner() -> Result<(), BenchmarkError> { - bench_init! { - owner: sub; collection: collection(owner); - owner: cross_from_sub; - }; - let item = create_max_item(&collection, &owner, owner.clone())?; - - #[block] - { - collection.token_owner(item).unwrap(); + >::set_token_property_permissions(&collection, &owner, perms)?; } Ok(()) diff --git a/pallets/nonfungible/src/common.rs b/pallets/nonfungible/src/common.rs index 7fe032d30e..3e3c94cdce 100644 --- a/pallets/nonfungible/src/common.rs +++ b/pallets/nonfungible/src/common.rs @@ -39,24 +39,21 @@ pub struct CommonWeights(PhantomData); impl CommonWeightInfo for CommonWeights { fn create_multiple_items_ex(data: &CreateItemExData) -> Weight { match data { - CreateItemExData::NFT(t) => >::create_multiple_items_ex(t.len() as u32) - .saturating_add(write_token_properties_total_weight::( - t.iter().map(|t| t.properties.len() as u32), - >::write_token_properties, - )), + CreateItemExData::NFT(t) => mint_with_props_weight::( + >::create_multiple_items_ex(t.len() as u32), + t.iter().map(|t| t.properties.len() as u32), + ), _ => Weight::zero(), } } fn create_multiple_items(data: &[up_data_structs::CreateItemData]) -> Weight { - >::create_multiple_items(data.len() as u32).saturating_add( - write_token_properties_total_weight::( - data.iter().map(|t| match t { - up_data_structs::CreateItemData::NFT(n) => n.properties.len() as u32, - _ => 0, - }), - >::write_token_properties, - ), + mint_with_props_weight::( + >::create_multiple_items(data.len() as u32), + data.iter().map(|t| match t { + up_data_structs::CreateItemData::NFT(n) => n.properties.len() as u32, + _ => 0, + }), ) } @@ -113,6 +110,16 @@ impl CommonWeightInfo for CommonWeights { } } +pub(crate) fn mint_with_props_weight( + create_no_data_weight: Weight, + tokens: impl Iterator + Clone, +) -> Weight { + create_no_data_weight.saturating_add(write_token_properties_total_weight::( + tokens, + >::write_token_properties, + )) +} + fn map_create_data( data: up_data_structs::CreateItemData, to: &T::CrossAccountId, diff --git a/pallets/nonfungible/src/erc.rs b/pallets/nonfungible/src/erc.rs index 3ad3e393aa..81c77bce9e 100644 --- a/pallets/nonfungible/src/erc.rs +++ b/pallets/nonfungible/src/erc.rs @@ -49,8 +49,10 @@ use up_data_structs::{ }; use crate::{ - common::CommonWeights, weights::WeightInfo, AccountBalance, Config, CreateItemData, - NonfungibleHandle, Pallet, SelfWeightOf, TokenData, TokenProperties, TokensMinted, + common::{mint_with_props_weight, CommonWeights}, + weights::WeightInfo, + AccountBalance, Config, CreateItemData, NonfungibleHandle, Pallet, SelfWeightOf, TokenData, + TokenProperties, TokensMinted, }; /// Nft events. @@ -620,7 +622,7 @@ impl NonfungibleHandle { /// @param tokenUri Token URI that would be stored in the NFT properties /// @return uint256 The id of the newly minted token #[solidity(rename_selector = "mintWithTokenURI")] - #[weight(>::create_item() + >::set_token_properties(1))] + #[weight(mint_with_props_weight::(>::create_item(), [1].into_iter()))] fn mint_with_token_uri( &mut self, caller: Caller, @@ -642,7 +644,7 @@ impl NonfungibleHandle { /// @param tokenId ID of the minted NFT /// @param tokenUri Token URI that would be stored in the NFT properties #[solidity(hide, rename_selector = "mintWithTokenURI")] - #[weight(>::create_item() + >::set_token_properties(1))] + #[weight(mint_with_props_weight::(>::create_item(), [1].into_iter()))] fn mint_with_token_uri_check_id( &mut self, caller: Caller, @@ -974,7 +976,12 @@ where /// @notice Function to mint a token. /// @param data Array of pairs of token owner and token's properties for minted token - #[weight(>::create_multiple_items(data.len() as u32) + >::set_token_properties(data.len() as u32))] + #[weight( + mint_with_props_weight::( + >::create_multiple_items_ex(data.len() as u32), + data.iter().map(|d| d.properties.len() as u32), + ) + )] fn mint_bulk_cross(&mut self, caller: Caller, data: Vec) -> Result { let caller = T::CrossAccountId::from_eth(caller); @@ -1008,7 +1015,12 @@ where /// @param to The new owner /// @param tokens array of pairs of token ID and token URI for minted tokens #[solidity(hide, rename_selector = "mintBulkWithTokenURI")] - #[weight(>::create_multiple_items(tokens.len() as u32) + >::set_token_properties(tokens.len() as u32))] + #[weight( + mint_with_props_weight::( + >::create_multiple_items(tokens.len() as u32), + tokens.iter().map(|_| 1), + ) + )] fn mint_bulk_with_token_uri( &mut self, caller: Caller, @@ -1056,7 +1068,7 @@ where /// @param to The new owner crossAccountId /// @param properties Properties of minted token /// @return uint256 The id of the newly minted token - #[weight(>::create_item() + >::set_token_properties(properties.len() as u32))] + #[weight(mint_with_props_weight::(>::create_item(), [properties.len() as u32].into_iter()))] fn mint_cross( &mut self, caller: Caller, diff --git a/pallets/refungible/src/benchmarking.rs b/pallets/refungible/src/benchmarking.rs index 4008f668be..1daaf86250 100644 --- a/pallets/refungible/src/benchmarking.rs +++ b/pallets/refungible/src/benchmarking.rs @@ -421,114 +421,30 @@ mod benchmarks { Ok(()) } - // set_token_properties { - // let b in 0..MAX_PROPERTIES_PER_ITEM; - // bench_init!{ - // owner: sub; collection: collection(owner); - // owner: cross_from_sub; - // }; - // let perms = (0..b).map(|k| PropertyKeyPermission { - // key: property_key(k as usize), - // permission: PropertyPermission { - // mutable: false, - // collection_admin: true, - // token_owner: true, - // }, - // }).collect::>(); - // >::set_token_property_permissions(&collection, &owner, perms)?; - // let props = (0..b).map(|k| Property { - // key: property_key(k as usize), - // value: property_value(), - // }).collect::>(); - // let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - // }: {>::set_token_properties(&collection, &owner, item, props.into_iter(), &Unlimited)?} - - // load_token_properties { - // bench_init!{ - // owner: sub; collection: collection(owner); - // owner: cross_from_sub; - // }; - - // let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - // }: { - // pallet_common::BenchmarkPropertyWriter::::load_token_properties( - // &collection, - // item, - // ) - // } - - // write_token_properties { - // let b in 0..MAX_PROPERTIES_PER_ITEM; - // bench_init!{ - // owner: sub; collection: collection(owner); - // owner: cross_from_sub; - // }; - - // let perms = (0..b).map(|k| PropertyKeyPermission { - // key: property_key(k as usize), - // permission: PropertyPermission { - // mutable: false, - // collection_admin: true, - // token_owner: true, - // }, - // }).collect::>(); - // >::set_token_property_permissions(&collection, &owner, perms)?; - // let props = (0..b).map(|k| Property { - // key: property_key(k as usize), - // value: property_value(), - // }).collect::>(); - // let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - - // let lazy_collection_info = pallet_common::BenchmarkPropertyWriter::::load_collection_info( - // &collection, - // &owner, - // ); - // }: { - // let mut property_writer = pallet_common::BenchmarkPropertyWriter::new(&collection, lazy_collection_info); - - // property_writer.write_token_properties( - // item, - // props.into_iter(), - // crate::erc::ERC721TokenEvent::TokenChanged { - // token_id: item.into(), - // } - // .to_log(T::ContractAddress::get()), - // )? - // } - #[benchmark] - fn set_token_property_permissions( - b: Linear<0, MAX_PROPERTIES_PER_ITEM>, - ) -> Result<(), BenchmarkError> { + fn load_token_properties() -> Result<(), BenchmarkError> { bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; - let perms = (0..b) - .map(|k| PropertyKeyPermission { - key: property_key(k as usize), - permission: PropertyPermission { - mutable: false, - collection_admin: false, - token_owner: false, - }, - }) - .collect::>(); + + let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; #[block] { - >::set_token_property_permissions(&collection, &owner, perms)?; + pallet_common::BenchmarkPropertyWriter::::load_token_properties(&collection, item); } Ok(()) } #[benchmark] - fn set_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { + fn write_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { bench_init! { owner: sub; collection: collection(owner); owner: cross_from_sub; }; + let perms = (0..b) .map(|k| PropertyKeyPermission { key: property_key(k as usize), @@ -548,73 +464,29 @@ mod benchmarks { .collect::>(); let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; + let lazy_collection_info = + pallet_common::BenchmarkPropertyWriter::::load_collection_info(&collection, &owner); + #[block] { - >::set_token_properties( - &collection, - &owner, + let mut property_writer = + pallet_common::BenchmarkPropertyWriter::new(&collection, lazy_collection_info); + + property_writer.write_token_properties( item, props.into_iter(), - &Unlimited, + crate::erc::ERC721TokenEvent::TokenChanged { + token_id: item.into(), + } + .to_log(T::ContractAddress::get()), )?; } Ok(()) } - // TODO: - #[benchmark] - fn init_token_properties(b: Linear<0, MAX_PROPERTIES_PER_ITEM>) -> Result<(), BenchmarkError> { - // bench_init! { - // owner: sub; collection: collection(owner); - // owner: cross_from_sub; - // }; - - // let perms = (0..b) - // .map(|k| PropertyKeyPermission { - // key: property_key(k as usize), - // permission: PropertyPermission { - // mutable: false, - // collection_admin: true, - // token_owner: true, - // }, - // }) - // .collect::>(); - // >::set_token_property_permissions(&collection, &owner, perms)?; - - #[block] - {} - // let props = (0..b).map(|k| Property { - // key: property_key(k as usize), - // value: property_value(), - // }).collect::>(); - // let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - - // let (is_collection_admin, property_permissions) = load_is_admin_and_property_permissions(&collection, &owner) - // let mut property_writer = pallet_common::collection_info_loaded_property_writer( - // &collection, - // is_collection_admin, - // property_permissions, - // ); - - // #[block] - // { - // property_writer.write_token_properties( - // true, - // item, - // props.into_iter(), - // crate::erc::ERC721TokenEvent::TokenChanged { - // token_id: item.into(), - // } - // .to_log(T::ContractAddress::get()), - // )?; - // } - - Ok(()) - } - #[benchmark] - fn delete_token_properties( + fn set_token_property_permissions( b: Linear<0, MAX_PROPERTIES_PER_ITEM>, ) -> Result<(), BenchmarkError> { bench_init! { @@ -625,38 +497,16 @@ mod benchmarks { .map(|k| PropertyKeyPermission { key: property_key(k as usize), permission: PropertyPermission { - mutable: true, - collection_admin: true, - token_owner: true, + mutable: false, + collection_admin: false, + token_owner: false, }, }) .collect::>(); - >::set_token_property_permissions(&collection, &owner, perms)?; - let props = (0..b) - .map(|k| Property { - key: property_key(k as usize), - value: property_value(), - }) - .collect::>(); - let item = create_max_item(&collection, &owner, [(owner.clone(), 200)])?; - >::set_token_properties( - &collection, - &owner, - item, - props.into_iter(), - &Unlimited, - )?; - let to_delete = (0..b).map(|k| property_key(k as usize)).collect::>(); #[block] { - >::delete_token_properties( - &collection, - &owner, - item, - to_delete.into_iter(), - &Unlimited, - )?; + >::set_token_property_permissions(&collection, &owner, perms)?; } Ok(()) @@ -678,22 +528,6 @@ mod benchmarks { Ok(()) } - #[benchmark] - fn token_owner() -> Result<(), BenchmarkError> { - bench_init! { - owner: sub; collection: collection(owner); - sender: cross_from_sub(owner); owner: cross_sub; - }; - let item = create_max_item(&collection, &sender, [(owner, 100)])?; - - #[block] - { - >::token_owner(collection.id, item).unwrap(); - } - - Ok(()) - } - #[benchmark] fn set_allowance_for_all() -> Result<(), BenchmarkError> { bench_init! { diff --git a/pallets/refungible/src/common.rs b/pallets/refungible/src/common.rs index bae6dc94a8..1adbe59c5f 100644 --- a/pallets/refungible/src/common.rs +++ b/pallets/refungible/src/common.rs @@ -47,35 +47,27 @@ macro_rules! max_weight_of { pub struct CommonWeights(PhantomData); impl CommonWeightInfo for CommonWeights { fn create_multiple_items(data: &[up_data_structs::CreateItemData]) -> Weight { - >::create_multiple_items(data.len() as u32).saturating_add( - write_token_properties_total_weight::( - data.iter().map(|data| match data { - up_data_structs::CreateItemData::ReFungible(rft_data) => { - rft_data.properties.len() as u32 - } - _ => 0, - }), - >::write_token_properties, - ), + mint_with_props_weight::( + >::create_multiple_items(data.len() as u32), + data.iter().map(|data| match data { + up_data_structs::CreateItemData::ReFungible(rft_data) => { + rft_data.properties.len() as u32 + } + _ => 0, + }), ) } fn create_multiple_items_ex(call: &CreateItemExData) -> Weight { match call { - CreateItemExData::RefungibleMultipleOwners(i) => { - >::create_multiple_items_ex_multiple_owners(i.users.len() as u32) - .saturating_add(write_token_properties_total_weight::( - [i.properties.len() as u32].into_iter(), - >::write_token_properties, - )) - } - CreateItemExData::RefungibleMultipleItems(i) => { - >::create_multiple_items_ex_multiple_items(i.len() as u32) - .saturating_add(write_token_properties_total_weight::( - i.iter().map(|d| d.properties.len() as u32), - >::write_token_properties, - )) - } + CreateItemExData::RefungibleMultipleOwners(i) => mint_with_props_weight::( + >::create_multiple_items_ex_multiple_owners(i.users.len() as u32), + [i.properties.len() as u32].into_iter(), + ), + CreateItemExData::RefungibleMultipleItems(i) => mint_with_props_weight::( + >::create_multiple_items_ex_multiple_items(i.len() as u32), + i.iter().map(|d| d.properties.len() as u32), + ), _ => Weight::zero(), } } @@ -138,6 +130,16 @@ impl CommonWeightInfo for CommonWeights { } } +pub(crate) fn mint_with_props_weight( + create_no_data_weight: Weight, + tokens: impl Iterator + Clone, +) -> Weight { + create_no_data_weight.saturating_add(write_token_properties_total_weight::( + tokens, + >::write_token_properties, + )) +} + fn map_create_data( data: up_data_structs::CreateItemData, to: &T::CrossAccountId, diff --git a/pallets/refungible/src/erc.rs b/pallets/refungible/src/erc.rs index 4515a45920..441d0bb9f8 100644 --- a/pallets/refungible/src/erc.rs +++ b/pallets/refungible/src/erc.rs @@ -50,8 +50,10 @@ use up_data_structs::{ }; use crate::{ - common::CommonWeights, weights::WeightInfo, AccountBalance, Balance, Config, CreateItemData, - Pallet, RefungibleHandle, SelfWeightOf, TokenProperties, TokensMinted, TotalSupply, + common::{mint_with_props_weight, CommonWeights}, + weights::WeightInfo, + AccountBalance, Balance, Config, CreateItemData, Pallet, RefungibleHandle, SelfWeightOf, + TokenProperties, TokensMinted, TotalSupply, }; frontier_contract! { @@ -661,7 +663,7 @@ impl RefungibleHandle { /// @param tokenUri Token URI that would be stored in the NFT properties /// @return uint256 The id of the newly minted token #[solidity(rename_selector = "mintWithTokenURI")] - #[weight(>::create_item() + >::set_token_properties(1))] + #[weight(mint_with_props_weight::(>::create_item(), [1].into_iter()))] fn mint_with_token_uri( &mut self, caller: Caller, @@ -683,7 +685,7 @@ impl RefungibleHandle { /// @param tokenId ID of the minted RFT /// @param tokenUri Token URI that would be stored in the RFT properties #[solidity(hide, rename_selector = "mintWithTokenURI")] - #[weight(>::create_item() + >::set_token_properties(1))] + #[weight(mint_with_props_weight::(>::create_item(), [1].into_iter()))] fn mint_with_token_uri_check_id( &mut self, caller: Caller, @@ -1052,22 +1054,26 @@ where } /// @notice Function to mint a token. - /// @param tokenProperties Properties of minted token - #[weight(if token_properties.len() == 1 { - >::create_multiple_items_ex_multiple_owners(token_properties.iter().next().unwrap().owners.len() as u32) + /// @param tokensData Data of minted token(s) + #[weight(if tokens_data.len() == 1 { + let token_data = tokens_data.first().unwrap(); + + mint_with_props_weight::( + >::create_multiple_items_ex_multiple_owners(token_data.owners.len() as u32), + [token_data.properties.len() as u32].into_iter(), + ) } else { - >::create_multiple_items_ex_multiple_items(token_properties.len() as u32) - } + >::set_token_properties(token_properties.len() as u32))] - fn mint_bulk_cross( - &mut self, - caller: Caller, - token_properties: Vec, - ) -> Result { + mint_with_props_weight::( + >::create_multiple_items_ex_multiple_items(tokens_data.len() as u32), + tokens_data.iter().map(|d| d.properties.len() as u32), + ) + })] + fn mint_bulk_cross(&mut self, caller: Caller, tokens_data: Vec) -> Result { let caller = T::CrossAccountId::from_eth(caller); - let has_multiple_tokens = token_properties.len() > 1; + let has_multiple_tokens = tokens_data.len() > 1; - let mut create_rft_data = Vec::with_capacity(token_properties.len()); - for MintTokenData { owners, properties } in token_properties { + let mut create_rft_data = Vec::with_capacity(tokens_data.len()); + for MintTokenData { owners, properties } in tokens_data { let has_multiple_owners = owners.len() > 1; if has_multiple_tokens & has_multiple_owners { return Err( @@ -1108,7 +1114,12 @@ where /// @param to The new owner /// @param tokens array of pairs of token ID and token URI for minted tokens #[solidity(hide, rename_selector = "mintBulkWithTokenURI")] - #[weight(>::create_multiple_items(tokens.len() as u32) + >::set_token_properties(tokens.len() as u32))] + #[weight( + mint_with_props_weight::( + >::create_multiple_items(tokens.len() as u32), + tokens.iter().map(|_| 1), + ) + )] fn mint_bulk_with_token_uri( &mut self, caller: Caller, @@ -1162,7 +1173,7 @@ where /// @param to The new owner crossAccountId /// @param properties Properties of minted token /// @return uint256 The id of the newly minted token - #[weight(>::create_item() + >::set_token_properties(properties.len() as u32))] + #[weight(mint_with_props_weight::(>::create_item(), [properties.len() as u32].into_iter()))] fn mint_cross( &mut self, caller: Caller, From a46c1bd5f7aec4fc6ea16ad65031eaae04454b97 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Fri, 13 Oct 2023 12:39:38 +0200 Subject: [PATCH 126/143] chore: temporary common/nft/rft weights --- pallets/common/src/weights.rs | 99 +-- pallets/nonfungible/src/weights.rs | 709 ++++++++------------ pallets/refungible/src/weights.rs | 999 +++++++++++++---------------- 3 files changed, 739 insertions(+), 1068 deletions(-) diff --git a/pallets/common/src/weights.rs b/pallets/common/src/weights.rs index 8e47211d74..57a73dfdb0 100644 --- a/pallets/common/src/weights.rs +++ b/pallets/common/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_common //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-05, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -34,7 +34,6 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_common. pub trait WeightInfo { fn set_collection_properties(b: u32, ) -> Weight; - fn delete_collection_properties(b: u32, ) -> Weight; fn check_accesslist() -> Weight; fn property_writer_load_collection_info() -> Weight; } @@ -42,108 +41,80 @@ pub trait WeightInfo { /// Weights for pallet_common using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Common CollectionProperties (r:1 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) + /// Storage: `Common::CollectionProperties` (r:1 w:1) + /// Proof: `Common::CollectionProperties` (`max_values`: None, `max_size`: Some(40992), added: 43467, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 64]`. fn set_collection_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 2_840_000 picoseconds. - Weight::from_parts(1_988_405, 44457) - // Standard Error: 7_834 - .saturating_add(Weight::from_parts(3_053_965, 0).saturating_mul(b.into())) + // Minimum execution time: 4_560_000 picoseconds. + Weight::from_parts(28_643_440, 44457) + // Standard Error: 28_941 + .saturating_add(Weight::from_parts(18_277_422, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionProperties (r:1 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 64]`. - fn delete_collection_properties(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `303 + b * (33030 ±0)` - // Estimated: `44457` - // Minimum execution time: 2_770_000 picoseconds. - Weight::from_parts(2_940_000, 44457) - // Standard Error: 30_686 - .saturating_add(Weight::from_parts(9_801_835, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: Common Allowlist (r:1 w:0) - /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) + /// Storage: `Common::Allowlist` (r:1 w:0) + /// Proof: `Common::Allowlist` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) fn check_accesslist() -> Weight { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 2_830_000 picoseconds. - Weight::from_parts(2_950_000, 3535) + // Minimum execution time: 4_290_000 picoseconds. + Weight::from_parts(4_460_000, 3535) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Common IsAdmin (r:1 w:0) - /// Proof: Common IsAdmin (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: `Common::IsAdmin` (r:1 w:0) + /// Proof: `Common::IsAdmin` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionPropertyPermissions` (r:1 w:0) + /// Proof: `Common::CollectionPropertyPermissions` (`max_values`: None, `max_size`: Some(16726), added: 19201, mode: `MaxEncodedLen`) fn property_writer_load_collection_info() -> Weight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `20191` - // Minimum execution time: 3_970_000 picoseconds. - Weight::from_parts(4_140_000, 20191) + // Minimum execution time: 6_100_000 picoseconds. + Weight::from_parts(6_350_000, 20191) .saturating_add(T::DbWeight::get().reads(2_u64)) } } // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Common CollectionProperties (r:1 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) + /// Storage: `Common::CollectionProperties` (r:1 w:1) + /// Proof: `Common::CollectionProperties` (`max_values`: None, `max_size`: Some(40992), added: 43467, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 64]`. fn set_collection_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 2_840_000 picoseconds. - Weight::from_parts(1_988_405, 44457) - // Standard Error: 7_834 - .saturating_add(Weight::from_parts(3_053_965, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: Common CollectionProperties (r:1 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 64]`. - fn delete_collection_properties(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `303 + b * (33030 ±0)` - // Estimated: `44457` - // Minimum execution time: 2_770_000 picoseconds. - Weight::from_parts(2_940_000, 44457) - // Standard Error: 30_686 - .saturating_add(Weight::from_parts(9_801_835, 0).saturating_mul(b.into())) + // Minimum execution time: 4_560_000 picoseconds. + Weight::from_parts(28_643_440, 44457) + // Standard Error: 28_941 + .saturating_add(Weight::from_parts(18_277_422, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common Allowlist (r:1 w:0) - /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) + /// Storage: `Common::Allowlist` (r:1 w:0) + /// Proof: `Common::Allowlist` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) fn check_accesslist() -> Weight { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 2_830_000 picoseconds. - Weight::from_parts(2_950_000, 3535) + // Minimum execution time: 4_290_000 picoseconds. + Weight::from_parts(4_460_000, 3535) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Common IsAdmin (r:1 w:0) - /// Proof: Common IsAdmin (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) + /// Storage: `Common::IsAdmin` (r:1 w:0) + /// Proof: `Common::IsAdmin` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionPropertyPermissions` (r:1 w:0) + /// Proof: `Common::CollectionPropertyPermissions` (`max_values`: None, `max_size`: Some(16726), added: 19201, mode: `MaxEncodedLen`) fn property_writer_load_collection_info() -> Weight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `20191` - // Minimum execution time: 3_970_000 picoseconds. - Weight::from_parts(4_140_000, 20191) + // Minimum execution time: 6_100_000 picoseconds. + Weight::from_parts(6_350_000, 20191) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/pallets/nonfungible/src/weights.rs b/pallets/nonfungible/src/weights.rs index 377e283cd2..82802a2b33 100644 --- a/pallets/nonfungible/src/weights.rs +++ b/pallets/nonfungible/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_nonfungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-05, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -37,19 +37,14 @@ pub trait WeightInfo { fn create_multiple_items(b: u32, ) -> Weight; fn create_multiple_items_ex(b: u32, ) -> Weight; fn burn_item() -> Weight; - fn burn_recursively_self_raw() -> Weight; - fn burn_recursively_breadth_plus_self_plus_self_per_each_raw(b: u32, ) -> Weight; fn transfer_raw() -> Weight; fn approve() -> Weight; fn approve_from() -> Weight; fn check_allowed_raw() -> Weight; fn burn_from() -> Weight; - fn set_token_property_permissions(b: u32, ) -> Weight; - fn set_token_properties(b: u32, ) -> Weight; fn load_token_properties() -> Weight; fn write_token_properties(b: u32, ) -> Weight; - fn delete_token_properties(b: u32, ) -> Weight; - fn token_owner() -> Weight; + fn set_token_property_permissions(b: u32, ) -> Weight; fn set_allowance_for_all() -> Weight; fn allowance_for_all() -> Weight; fn repair_item() -> Weight; @@ -58,331 +53,231 @@ pub trait WeightInfo { /// Weights for pallet_nonfungible using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Nonfungible TokensMinted (r:1 w:1) - /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:1 w:1) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:0 w:1) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:1) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokensMinted` (r:1 w:1) + /// Proof: `Nonfungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:1 w:1) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:0 w:1) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:1) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn create_item() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 4_990_000 picoseconds. - Weight::from_parts(5_170_000, 3530) + // Minimum execution time: 15_410_000 picoseconds. + Weight::from_parts(15_850_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Nonfungible TokensMinted (r:1 w:1) - /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:1 w:1) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:0 w:200) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:200) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokensMinted` (r:1 w:1) + /// Proof: `Nonfungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:1 w:1) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:0 w:200) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:200) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 1_680_000 picoseconds. - Weight::from_parts(1_720_000, 3530) - // Standard Error: 674 - .saturating_add(Weight::from_parts(2_406_591, 0).saturating_mul(b.into())) + // Minimum execution time: 3_300_000 picoseconds. + Weight::from_parts(5_992_994, 3530) + // Standard Error: 4_478 + .saturating_add(Weight::from_parts(8_002_092, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) } - /// Storage: Nonfungible TokensMinted (r:1 w:1) - /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:200 w:200) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:0 w:200) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:200) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokensMinted` (r:1 w:1) + /// Proof: `Nonfungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:200 w:200) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:0 w:200) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:200) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_680_000 picoseconds. - Weight::from_parts(1_720_000, 3481) - // Standard Error: 1_729 - .saturating_add(Weight::from_parts(3_418_983, 0).saturating_mul(b.into())) + // Minimum execution time: 3_300_000 picoseconds. + Weight::from_parts(3_980_000, 3481) + // Standard Error: 1_382 + .saturating_add(Weight::from_parts(11_259_286, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } - /// Storage: Nonfungible TokenData (r:1 w:1) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenChildren (r:1 w:0) - /// Proof: Nonfungible TokenChildren (max_values: None, max_size: Some(41), added: 2516, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensBurnt (r:1 w:1) - /// Proof: Nonfungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:1 w:1) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:1 w:0) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:1) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:0 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenData` (r:1 w:1) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenChildren` (r:1 w:0) + /// Proof: `Nonfungible::TokenChildren` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokensBurnt` (r:1 w:1) + /// Proof: `Nonfungible::TokensBurnt` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:1 w:1) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Allowance` (r:1 w:0) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:1) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenProperties` (r:0 w:1) + /// Proof: `Nonfungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn burn_item() -> Weight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 10_700_000 picoseconds. - Weight::from_parts(11_180_000, 3530) + // Minimum execution time: 26_360_000 picoseconds. + Weight::from_parts(26_850_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nonfungible TokenChildren (r:1 w:0) - /// Proof: Nonfungible TokenChildren (max_values: None, max_size: Some(41), added: 2516, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:1) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensBurnt (r:1 w:1) - /// Proof: Nonfungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:1 w:1) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:1 w:0) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:1) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:0 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) - fn burn_recursively_self_raw() -> Weight { - // Proof Size summary in bytes: - // Measured: `380` - // Estimated: `3530` - // Minimum execution time: 13_650_000 picoseconds. - Weight::from_parts(13_910_000, 3530) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) - } - /// Storage: Nonfungible TokenChildren (r:401 w:200) - /// Proof: Nonfungible TokenChildren (max_values: None, max_size: Some(41), added: 2516, mode: MaxEncodedLen) - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:201 w:201) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensBurnt (r:1 w:1) - /// Proof: Nonfungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:2 w:2) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:201 w:0) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:201) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:0 w:201) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 200]`. - fn burn_recursively_breadth_plus_self_plus_self_per_each_raw(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1500 + b * (58 ±0)` - // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 13_500_000 picoseconds. - Weight::from_parts(13_830_000, 5874) - // Standard Error: 136_447 - .saturating_add(Weight::from_parts(43_149_279, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(6_u64)) - .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 5032).saturating_mul(b.into())) - } - /// Storage: Nonfungible TokenData (r:1 w:1) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:2 w:2) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:1 w:0) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:2) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenData` (r:1 w:1) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:2 w:2) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Allowance` (r:1 w:0) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:2) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_raw() -> Weight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 8_440_000 picoseconds. - Weight::from_parts(8_680_000, 6070) + // Minimum execution time: 22_710_000 picoseconds. + Weight::from_parts(23_130_000, 6070) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:1 w:1) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenData` (r:1 w:0) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Allowance` (r:1 w:1) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 4_580_000 picoseconds. - Weight::from_parts(4_850_000, 3522) + // Minimum execution time: 11_520_000 picoseconds. + Weight::from_parts(12_030_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:1 w:1) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenData` (r:1 w:0) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Allowance` (r:1 w:1) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) fn approve_from() -> Weight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 4_650_000 picoseconds. - Weight::from_parts(4_890_000, 3522) + // Minimum execution time: 11_570_000 picoseconds. + Weight::from_parts(12_139_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible Allowance (r:1 w:0) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) + /// Storage: `Nonfungible::Allowance` (r:1 w:0) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) fn check_allowed_raw() -> Weight { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 2_630_000 picoseconds. - Weight::from_parts(2_760_000, 3522) + // Minimum execution time: 4_210_000 picoseconds. + Weight::from_parts(4_350_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Nonfungible Allowance (r:1 w:1) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:1) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenChildren (r:1 w:0) - /// Proof: Nonfungible TokenChildren (max_values: None, max_size: Some(41), added: 2516, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensBurnt (r:1 w:1) - /// Proof: Nonfungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:1 w:1) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:1) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:0 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Nonfungible::Allowance` (r:1 w:1) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:1 w:1) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenChildren` (r:1 w:0) + /// Proof: `Nonfungible::TokenChildren` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokensBurnt` (r:1 w:1) + /// Proof: `Nonfungible::TokensBurnt` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:1 w:1) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:1) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenProperties` (r:0 w:1) + /// Proof: `Nonfungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn burn_from() -> Weight { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 13_300_000 picoseconds. - Weight::from_parts(13_650_000, 3530) + // Minimum execution time: 32_230_000 picoseconds. + Weight::from_parts(33_210_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:1) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 64]`. - fn set_token_property_permissions(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `20191` - // Minimum execution time: 550_000 picoseconds. - Weight::from_parts(600_000, 20191) - // Standard Error: 23_117 - .saturating_add(Weight::from_parts(6_048_092, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 64]`. - fn set_token_properties(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `640 + b * (261 ±0)` - // Estimated: `36269` - // Minimum execution time: 340_000 picoseconds. - Weight::from_parts(7_359_078, 36269) - // Standard Error: 9_052 - .saturating_add(Weight::from_parts(2_763_267, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: Nonfungible TokenProperties (r:1 w:0) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenProperties` (r:1 w:0) + /// Proof: `Nonfungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn load_token_properties() -> Weight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 1_610_000 picoseconds. - Weight::from_parts(1_690_000, 36269) + // Minimum execution time: 3_180_000 picoseconds. + Weight::from_parts(3_370_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Nonfungible TokenProperties (r:0 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenProperties` (r:0 w:1) + /// Proof: `Nonfungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 64]`. fn write_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 70_000 picoseconds. - Weight::from_parts(3_262_181, 0) - // Standard Error: 5_240 - .saturating_add(Weight::from_parts(2_426_582, 0).saturating_mul(b.into())) + // Minimum execution time: 440_000 picoseconds. + Weight::from_parts(3_567_990, 0) + // Standard Error: 24_013 + .saturating_add(Weight::from_parts(19_386_123, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Common::CollectionPropertyPermissions` (r:1 w:1) + /// Proof: `Common::CollectionPropertyPermissions` (`max_values`: None, `max_size`: Some(16726), added: 19201, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 64]`. - fn delete_token_properties(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `699 + b * (33291 ±0)` - // Estimated: `36269` - // Minimum execution time: 350_000 picoseconds. - Weight::from_parts(370_000, 36269) - // Standard Error: 29_081 - .saturating_add(Weight::from_parts(9_667_268, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - fn token_owner() -> Weight { + fn set_token_property_permissions(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `326` - // Estimated: `3522` - // Minimum execution time: 2_380_000 picoseconds. - Weight::from_parts(2_500_000, 3522) + // Measured: `314` + // Estimated: `20191` + // Minimum execution time: 1_460_000 picoseconds. + Weight::from_parts(1_530_000, 20191) + // Standard Error: 124_929 + .saturating_add(Weight::from_parts(28_397_581, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible CollectionAllowance (r:0 w:1) - /// Proof: Nonfungible CollectionAllowance (max_values: None, max_size: Some(111), added: 2586, mode: MaxEncodedLen) + /// Storage: `Nonfungible::CollectionAllowance` (r:0 w:1) + /// Proof: `Nonfungible::CollectionAllowance` (`max_values`: None, `max_size`: Some(111), added: 2586, mode: `MaxEncodedLen`) fn set_allowance_for_all() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_060_000 picoseconds. - Weight::from_parts(2_150_000, 0) + // Minimum execution time: 6_840_000 picoseconds. + Weight::from_parts(7_160_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible CollectionAllowance (r:1 w:0) - /// Proof: Nonfungible CollectionAllowance (max_values: None, max_size: Some(111), added: 2586, mode: MaxEncodedLen) + /// Storage: `Nonfungible::CollectionAllowance` (r:1 w:0) + /// Proof: `Nonfungible::CollectionAllowance` (`max_values`: None, `max_size`: Some(111), added: 2586, mode: `MaxEncodedLen`) fn allowance_for_all() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 1_630_000 picoseconds. - Weight::from_parts(1_730_000, 3576) + // Minimum execution time: 3_630_000 picoseconds. + Weight::from_parts(3_780_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenProperties` (r:1 w:1) + /// Proof: `Nonfungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn repair_item() -> Weight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 1_700_000 picoseconds. - Weight::from_parts(1_780_000, 36269) + // Minimum execution time: 3_280_000 picoseconds. + Weight::from_parts(3_480_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -390,331 +285,231 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Nonfungible TokensMinted (r:1 w:1) - /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:1 w:1) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:0 w:1) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:1) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokensMinted` (r:1 w:1) + /// Proof: `Nonfungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:1 w:1) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:0 w:1) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:1) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn create_item() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 4_990_000 picoseconds. - Weight::from_parts(5_170_000, 3530) + // Minimum execution time: 15_410_000 picoseconds. + Weight::from_parts(15_850_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Nonfungible TokensMinted (r:1 w:1) - /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:1 w:1) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:0 w:200) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:200) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokensMinted` (r:1 w:1) + /// Proof: `Nonfungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:1 w:1) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:0 w:200) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:200) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 1_680_000 picoseconds. - Weight::from_parts(1_720_000, 3530) - // Standard Error: 674 - .saturating_add(Weight::from_parts(2_406_591, 0).saturating_mul(b.into())) + // Minimum execution time: 3_300_000 picoseconds. + Weight::from_parts(5_992_994, 3530) + // Standard Error: 4_478 + .saturating_add(Weight::from_parts(8_002_092, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) } - /// Storage: Nonfungible TokensMinted (r:1 w:1) - /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:200 w:200) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:0 w:200) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:200) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokensMinted` (r:1 w:1) + /// Proof: `Nonfungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:200 w:200) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:0 w:200) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:200) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_680_000 picoseconds. - Weight::from_parts(1_720_000, 3481) - // Standard Error: 1_729 - .saturating_add(Weight::from_parts(3_418_983, 0).saturating_mul(b.into())) + // Minimum execution time: 3_300_000 picoseconds. + Weight::from_parts(3_980_000, 3481) + // Standard Error: 1_382 + .saturating_add(Weight::from_parts(11_259_286, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } - /// Storage: Nonfungible TokenData (r:1 w:1) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenChildren (r:1 w:0) - /// Proof: Nonfungible TokenChildren (max_values: None, max_size: Some(41), added: 2516, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensBurnt (r:1 w:1) - /// Proof: Nonfungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:1 w:1) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:1 w:0) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:1) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:0 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenData` (r:1 w:1) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenChildren` (r:1 w:0) + /// Proof: `Nonfungible::TokenChildren` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokensBurnt` (r:1 w:1) + /// Proof: `Nonfungible::TokensBurnt` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:1 w:1) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Allowance` (r:1 w:0) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:1) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenProperties` (r:0 w:1) + /// Proof: `Nonfungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn burn_item() -> Weight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 10_700_000 picoseconds. - Weight::from_parts(11_180_000, 3530) + // Minimum execution time: 26_360_000 picoseconds. + Weight::from_parts(26_850_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nonfungible TokenChildren (r:1 w:0) - /// Proof: Nonfungible TokenChildren (max_values: None, max_size: Some(41), added: 2516, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:1) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensBurnt (r:1 w:1) - /// Proof: Nonfungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:1 w:1) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:1 w:0) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:1) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:0 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) - fn burn_recursively_self_raw() -> Weight { - // Proof Size summary in bytes: - // Measured: `380` - // Estimated: `3530` - // Minimum execution time: 13_650_000 picoseconds. - Weight::from_parts(13_910_000, 3530) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) - } - /// Storage: Nonfungible TokenChildren (r:401 w:200) - /// Proof: Nonfungible TokenChildren (max_values: None, max_size: Some(41), added: 2516, mode: MaxEncodedLen) - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:201 w:201) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensBurnt (r:1 w:1) - /// Proof: Nonfungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:2 w:2) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:201 w:0) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:201) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:0 w:201) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 200]`. - fn burn_recursively_breadth_plus_self_plus_self_per_each_raw(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1500 + b * (58 ±0)` - // Estimated: `5874 + b * (5032 ±0)` - // Minimum execution time: 13_500_000 picoseconds. - Weight::from_parts(13_830_000, 5874) - // Standard Error: 136_447 - .saturating_add(Weight::from_parts(43_149_279, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(b.into()))) - .saturating_add(RocksDbWeight::get().writes(6_u64)) - .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(b.into()))) - .saturating_add(Weight::from_parts(0, 5032).saturating_mul(b.into())) - } - /// Storage: Nonfungible TokenData (r:1 w:1) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:2 w:2) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:1 w:0) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:2) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenData` (r:1 w:1) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:2 w:2) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Allowance` (r:1 w:0) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:2) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_raw() -> Weight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 8_440_000 picoseconds. - Weight::from_parts(8_680_000, 6070) + // Minimum execution time: 22_710_000 picoseconds. + Weight::from_parts(23_130_000, 6070) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:1 w:1) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenData` (r:1 w:0) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Allowance` (r:1 w:1) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 4_580_000 picoseconds. - Weight::from_parts(4_850_000, 3522) + // Minimum execution time: 11_520_000 picoseconds. + Weight::from_parts(12_030_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible Allowance (r:1 w:1) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenData` (r:1 w:0) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Allowance` (r:1 w:1) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) fn approve_from() -> Weight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 4_650_000 picoseconds. - Weight::from_parts(4_890_000, 3522) + // Minimum execution time: 11_570_000 picoseconds. + Weight::from_parts(12_139_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible Allowance (r:1 w:0) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) + /// Storage: `Nonfungible::Allowance` (r:1 w:0) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) fn check_allowed_raw() -> Weight { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 2_630_000 picoseconds. - Weight::from_parts(2_760_000, 3522) + // Minimum execution time: 4_210_000 picoseconds. + Weight::from_parts(4_350_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Nonfungible Allowance (r:1 w:1) - /// Proof: Nonfungible Allowance (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:1) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenChildren (r:1 w:0) - /// Proof: Nonfungible TokenChildren (max_values: None, max_size: Some(41), added: 2516, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensBurnt (r:1 w:1) - /// Proof: Nonfungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible AccountBalance (r:1 w:1) - /// Proof: Nonfungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Nonfungible Owned (r:0 w:1) - /// Proof: Nonfungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:0 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Nonfungible::Allowance` (r:1 w:1) + /// Proof: `Nonfungible::Allowance` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:1 w:1) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenChildren` (r:1 w:0) + /// Proof: `Nonfungible::TokenChildren` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokensBurnt` (r:1 w:1) + /// Proof: `Nonfungible::TokensBurnt` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::AccountBalance` (r:1 w:1) + /// Proof: `Nonfungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::Owned` (r:0 w:1) + /// Proof: `Nonfungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenProperties` (r:0 w:1) + /// Proof: `Nonfungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn burn_from() -> Weight { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 13_300_000 picoseconds. - Weight::from_parts(13_650_000, 3530) + // Minimum execution time: 32_230_000 picoseconds. + Weight::from_parts(33_210_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:1) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 64]`. - fn set_token_property_permissions(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `20191` - // Minimum execution time: 550_000 picoseconds. - Weight::from_parts(600_000, 20191) - // Standard Error: 23_117 - .saturating_add(Weight::from_parts(6_048_092, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 64]`. - fn set_token_properties(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `640 + b * (261 ±0)` - // Estimated: `36269` - // Minimum execution time: 340_000 picoseconds. - Weight::from_parts(7_359_078, 36269) - // Standard Error: 9_052 - .saturating_add(Weight::from_parts(2_763_267, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: Nonfungible TokenProperties (r:1 w:0) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenProperties` (r:1 w:0) + /// Proof: `Nonfungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn load_token_properties() -> Weight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 1_610_000 picoseconds. - Weight::from_parts(1_690_000, 36269) + // Minimum execution time: 3_180_000 picoseconds. + Weight::from_parts(3_370_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Nonfungible TokenProperties (r:0 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenProperties` (r:0 w:1) + /// Proof: `Nonfungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 64]`. fn write_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 70_000 picoseconds. - Weight::from_parts(3_262_181, 0) - // Standard Error: 5_240 - .saturating_add(Weight::from_parts(2_426_582, 0).saturating_mul(b.into())) + // Minimum execution time: 440_000 picoseconds. + Weight::from_parts(3_567_990, 0) + // Standard Error: 24_013 + .saturating_add(Weight::from_parts(19_386_123, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Common::CollectionPropertyPermissions` (r:1 w:1) + /// Proof: `Common::CollectionPropertyPermissions` (`max_values`: None, `max_size`: Some(16726), added: 19201, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 64]`. - fn delete_token_properties(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `699 + b * (33291 ±0)` - // Estimated: `36269` - // Minimum execution time: 350_000 picoseconds. - Weight::from_parts(370_000, 36269) - // Standard Error: 29_081 - .saturating_add(Weight::from_parts(9_667_268, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - fn token_owner() -> Weight { + fn set_token_property_permissions(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `326` - // Estimated: `3522` - // Minimum execution time: 2_380_000 picoseconds. - Weight::from_parts(2_500_000, 3522) + // Measured: `314` + // Estimated: `20191` + // Minimum execution time: 1_460_000 picoseconds. + Weight::from_parts(1_530_000, 20191) + // Standard Error: 124_929 + .saturating_add(Weight::from_parts(28_397_581, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible CollectionAllowance (r:0 w:1) - /// Proof: Nonfungible CollectionAllowance (max_values: None, max_size: Some(111), added: 2586, mode: MaxEncodedLen) + /// Storage: `Nonfungible::CollectionAllowance` (r:0 w:1) + /// Proof: `Nonfungible::CollectionAllowance` (`max_values`: None, `max_size`: Some(111), added: 2586, mode: `MaxEncodedLen`) fn set_allowance_for_all() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_060_000 picoseconds. - Weight::from_parts(2_150_000, 0) + // Minimum execution time: 6_840_000 picoseconds. + Weight::from_parts(7_160_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nonfungible CollectionAllowance (r:1 w:0) - /// Proof: Nonfungible CollectionAllowance (max_values: None, max_size: Some(111), added: 2586, mode: MaxEncodedLen) + /// Storage: `Nonfungible::CollectionAllowance` (r:1 w:0) + /// Proof: `Nonfungible::CollectionAllowance` (`max_values`: None, `max_size`: Some(111), added: 2586, mode: `MaxEncodedLen`) fn allowance_for_all() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 1_630_000 picoseconds. - Weight::from_parts(1_730_000, 3576) + // Minimum execution time: 3_630_000 picoseconds. + Weight::from_parts(3_780_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Nonfungible TokenProperties (r:1 w:1) - /// Proof: Nonfungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Nonfungible::TokenProperties` (r:1 w:1) + /// Proof: `Nonfungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn repair_item() -> Weight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 1_700_000 picoseconds. - Weight::from_parts(1_780_000, 36269) + // Minimum execution time: 3_280_000 picoseconds. + Weight::from_parts(3_480_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/refungible/src/weights.rs b/pallets/refungible/src/weights.rs index e58b965648..2806a2ff2b 100644 --- a/pallets/refungible/src/weights.rs +++ b/pallets/refungible/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_refungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-05, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -50,13 +50,10 @@ pub trait WeightInfo { fn transfer_from_removing() -> Weight; fn transfer_from_creating_removing() -> Weight; fn burn_from() -> Weight; - fn set_token_property_permissions(b: u32, ) -> Weight; - fn set_token_properties(b: u32, ) -> Weight; fn load_token_properties() -> Weight; fn write_token_properties(b: u32, ) -> Weight; - fn delete_token_properties(b: u32, ) -> Weight; + fn set_token_property_permissions(b: u32, ) -> Weight; fn repartition_item() -> Weight; - fn token_owner() -> Weight; fn set_allowance_for_all() -> Weight; fn allowance_for_all() -> Weight; fn repair_item() -> Weight; @@ -65,445 +62,399 @@ pub trait WeightInfo { /// Weights for pallet_refungible using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Refungible TokensMinted (r:1 w:1) - /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:0 w:1) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:0 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::TokensMinted` (r:1 w:1) + /// Proof: `Refungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:0 w:1) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:0 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn create_item() -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 5_710_000 picoseconds. - Weight::from_parts(5_980_000, 3530) + // Minimum execution time: 19_400_000 picoseconds. + Weight::from_parts(19_890_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Refungible TokensMinted (r:1 w:1) - /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:0 w:200) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:0 w:200) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:200) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::TokensMinted` (r:1 w:1) + /// Proof: `Refungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:0 w:200) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:0 w:200) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:200) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 1_300_000 picoseconds. - Weight::from_parts(1_360_000, 3530) - // Standard Error: 2_783 - .saturating_add(Weight::from_parts(3_456_531, 0).saturating_mul(b.into())) + // Minimum execution time: 3_120_000 picoseconds. + Weight::from_parts(3_310_000, 3530) + // Standard Error: 2_748 + .saturating_add(Weight::from_parts(11_489_631, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) } - /// Storage: Refungible TokensMinted (r:1 w:1) - /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:200 w:200) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:0 w:200) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:0 w:200) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:200) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::TokensMinted` (r:1 w:1) + /// Proof: `Refungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:200 w:200) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:0 w:200) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:0 w:200) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:200) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_290_000 picoseconds. - Weight::from_parts(1_370_000, 3481) - // Standard Error: 3_198 - .saturating_add(Weight::from_parts(4_435_305, 0).saturating_mul(b.into())) + // Minimum execution time: 3_180_000 picoseconds. + Weight::from_parts(2_015_490, 3481) + // Standard Error: 6_052 + .saturating_add(Weight::from_parts(14_837_077, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } - /// Storage: Refungible TokensMinted (r:1 w:1) - /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:200 w:200) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:0 w:200) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:0 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:200) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::TokensMinted` (r:1 w:1) + /// Proof: `Refungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:200 w:200) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:0 w:200) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:0 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:200) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_owners(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_730_000 picoseconds. - Weight::from_parts(1_810_000, 3481) - // Standard Error: 1_923 - .saturating_add(Weight::from_parts(3_500_817, 0).saturating_mul(b.into())) + // Minimum execution time: 5_200_000 picoseconds. + Weight::from_parts(25_301_631, 3481) + // Standard Error: 6_177 + .saturating_add(Weight::from_parts(11_197_931, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } - /// Storage: Refungible Balance (r:3 w:1) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:3 w:1) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn burn_item_partial() -> Weight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 14_010_000 picoseconds. - Weight::from_parts(16_300_000, 8682) + // Minimum execution time: 29_540_000 picoseconds. + Weight::from_parts(30_190_000, 8682) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Refungible Balance (r:1 w:1) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokensBurnt (r:1 w:1) - /// Proof: Refungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:0 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:1 w:1) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TokensBurnt` (r:1 w:1) + /// Proof: `Refungible::TokensBurnt` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TokenProperties` (r:0 w:1) + /// Proof: `Refungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn burn_item_fully() -> Weight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 13_700_000 picoseconds. - Weight::from_parts(14_180_000, 3554) + // Minimum execution time: 30_650_000 picoseconds. + Weight::from_parts(31_370_000, 3554) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) fn transfer_normal() -> Weight { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 8_990_000 picoseconds. - Weight::from_parts(9_400_000, 6118) + // Minimum execution time: 18_530_000 picoseconds. + Weight::from_parts(19_010_000, 6118) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_creating() -> Weight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 10_240_000 picoseconds. - Weight::from_parts(10_610_000, 6118) + // Minimum execution time: 24_240_000 picoseconds. + Weight::from_parts(24_760_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_removing() -> Weight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 12_040_000 picoseconds. - Weight::from_parts(12_390_000, 6118) + // Minimum execution time: 25_990_000 picoseconds. + Weight::from_parts(26_650_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:2 w:2) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:2) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:2 w:2) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:2) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_creating_removing() -> Weight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 11_940_000 picoseconds. - Weight::from_parts(12_240_000, 6118) + // Minimum execution time: 29_550_000 picoseconds. + Weight::from_parts(30_530_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Refungible Balance (r:1 w:0) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible Allowance (r:0 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:1 w:0) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Allowance` (r:0 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 5_150_000 picoseconds. - Weight::from_parts(5_440_000, 3554) + // Minimum execution time: 11_420_000 picoseconds. + Weight::from_parts(11_810_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Refungible Balance (r:1 w:0) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible Allowance (r:0 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:1 w:0) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Allowance` (r:0 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) fn approve_from() -> Weight { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 5_170_000 picoseconds. - Weight::from_parts(5_400_000, 3554) + // Minimum execution time: 11_610_000 picoseconds. + Weight::from_parts(11_950_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Refungible Allowance (r:1 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: `Refungible::Allowance` (r:1 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) fn transfer_from_normal() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 13_150_000 picoseconds. - Weight::from_parts(13_600_000, 6118) + // Minimum execution time: 28_510_000 picoseconds. + Weight::from_parts(29_180_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Refungible Allowance (r:1 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Allowance` (r:1 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_from_creating() -> Weight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 14_280_000 picoseconds. - Weight::from_parts(14_680_000, 6118) + // Minimum execution time: 34_370_000 picoseconds. + Weight::from_parts(35_270_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Refungible Allowance (r:1 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Allowance` (r:1 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_from_removing() -> Weight { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 16_110_000 picoseconds. - Weight::from_parts(16_710_000, 6118) + // Minimum execution time: 36_490_000 picoseconds. + Weight::from_parts(37_160_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Refungible Allowance (r:1 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:2 w:2) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:2) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Allowance` (r:1 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:2 w:2) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:2) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_from_creating_removing() -> Weight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 16_130_000 picoseconds. - Weight::from_parts(16_680_000, 6118) + // Minimum execution time: 40_080_000 picoseconds. + Weight::from_parts(48_310_000, 6118) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Refungible Allowance (r:1 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:1 w:1) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokensBurnt (r:1 w:1) - /// Proof: Refungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:0 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Refungible::Allowance` (r:1 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:1 w:1) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TokensBurnt` (r:1 w:1) + /// Proof: `Refungible::TokensBurnt` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TokenProperties` (r:0 w:1) + /// Proof: `Refungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn burn_from() -> Weight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 18_380_000 picoseconds. - Weight::from_parts(18_870_000, 3570) + // Minimum execution time: 41_100_000 picoseconds. + Weight::from_parts(42_060_000, 3570) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:1) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 64]`. - fn set_token_property_permissions(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `20191` - // Minimum execution time: 580_000 picoseconds. - Weight::from_parts(660_000, 20191) - // Standard Error: 29_964 - .saturating_add(Weight::from_parts(6_251_766, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 64]`. - fn set_token_properties(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `502 + b * (261 ±0)` - // Estimated: `36269` - // Minimum execution time: 350_000 picoseconds. - Weight::from_parts(2_269_806, 36269) - // Standard Error: 7_751 - .saturating_add(Weight::from_parts(3_068_126, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: Refungible TokenProperties (r:1 w:0) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Refungible::TokenProperties` (r:1 w:0) + /// Proof: `Refungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn load_token_properties() -> Weight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 1_010_000 picoseconds. - Weight::from_parts(1_080_000, 36269) + // Minimum execution time: 2_520_000 picoseconds. + Weight::from_parts(2_670_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Refungible TokenProperties (r:0 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Refungible::TokenProperties` (r:0 w:1) + /// Proof: `Refungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 64]`. fn write_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 70_000 picoseconds. - Weight::from_parts(1_363_449, 0) - // Standard Error: 8_964 - .saturating_add(Weight::from_parts(2_665_759, 0).saturating_mul(b.into())) + // Minimum execution time: 490_000 picoseconds. + Weight::from_parts(3_457_547, 0) + // Standard Error: 24_239 + .saturating_add(Weight::from_parts(19_382_722, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Common::CollectionPropertyPermissions` (r:1 w:1) + /// Proof: `Common::CollectionPropertyPermissions` (`max_values`: None, `max_size`: Some(16726), added: 19201, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 64]`. - fn delete_token_properties(b: u32, ) -> Weight { + fn set_token_property_permissions(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `561 + b * (33291 ±0)` - // Estimated: `36269` - // Minimum execution time: 320_000 picoseconds. - Weight::from_parts(370_000, 36269) - // Standard Error: 28_541 - .saturating_add(Weight::from_parts(9_863_065, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `314` + // Estimated: `20191` + // Minimum execution time: 1_500_000 picoseconds. + Weight::from_parts(1_590_000, 20191) + // Standard Error: 123_927 + .saturating_add(Weight::from_parts(27_355_093, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Refungible TotalSupply (r:1 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:1 w:1) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Refungible::TotalSupply` (r:1 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:1 w:1) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn repartition_item() -> Weight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 6_320_000 picoseconds. - Weight::from_parts(6_640_000, 3554) + // Minimum execution time: 14_340_000 picoseconds. + Weight::from_parts(14_590_000, 3554) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Refungible Balance (r:2 w:0) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - fn token_owner() -> Weight { - // Proof Size summary in bytes: - // Measured: `288` - // Estimated: `6118` - // Minimum execution time: 2_520_000 picoseconds. - Weight::from_parts(2_680_000, 6118) - .saturating_add(T::DbWeight::get().reads(2_u64)) - } - /// Storage: Refungible CollectionAllowance (r:0 w:1) - /// Proof: Refungible CollectionAllowance (max_values: None, max_size: Some(111), added: 2586, mode: MaxEncodedLen) + /// Storage: `Refungible::CollectionAllowance` (r:0 w:1) + /// Proof: `Refungible::CollectionAllowance` (`max_values`: None, `max_size`: Some(111), added: 2586, mode: `MaxEncodedLen`) fn set_allowance_for_all() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_070_000 picoseconds. - Weight::from_parts(2_230_000, 0) + // Minimum execution time: 6_390_000 picoseconds. + Weight::from_parts(6_650_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Refungible CollectionAllowance (r:1 w:0) - /// Proof: Refungible CollectionAllowance (max_values: None, max_size: Some(111), added: 2586, mode: MaxEncodedLen) + /// Storage: `Refungible::CollectionAllowance` (r:1 w:0) + /// Proof: `Refungible::CollectionAllowance` (`max_values`: None, `max_size`: Some(111), added: 2586, mode: `MaxEncodedLen`) fn allowance_for_all() -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 1_270_000 picoseconds. - Weight::from_parts(1_420_000, 3576) + // Minimum execution time: 3_060_000 picoseconds. + Weight::from_parts(3_210_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Refungible::TokenProperties` (r:1 w:1) + /// Proof: `Refungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn repair_item() -> Weight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 1_010_000 picoseconds. - Weight::from_parts(1_160_000, 36269) + // Minimum execution time: 2_480_000 picoseconds. + Weight::from_parts(2_620_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -511,445 +462,399 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Refungible TokensMinted (r:1 w:1) - /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:0 w:1) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:0 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::TokensMinted` (r:1 w:1) + /// Proof: `Refungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:0 w:1) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:0 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn create_item() -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 5_710_000 picoseconds. - Weight::from_parts(5_980_000, 3530) + // Minimum execution time: 19_400_000 picoseconds. + Weight::from_parts(19_890_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Refungible TokensMinted (r:1 w:1) - /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:0 w:200) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:0 w:200) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:200) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::TokensMinted` (r:1 w:1) + /// Proof: `Refungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:0 w:200) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:0 w:200) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:200) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 1_300_000 picoseconds. - Weight::from_parts(1_360_000, 3530) - // Standard Error: 2_783 - .saturating_add(Weight::from_parts(3_456_531, 0).saturating_mul(b.into())) + // Minimum execution time: 3_120_000 picoseconds. + Weight::from_parts(3_310_000, 3530) + // Standard Error: 2_748 + .saturating_add(Weight::from_parts(11_489_631, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) } - /// Storage: Refungible TokensMinted (r:1 w:1) - /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:200 w:200) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:0 w:200) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:0 w:200) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:200) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::TokensMinted` (r:1 w:1) + /// Proof: `Refungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:200 w:200) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:0 w:200) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:0 w:200) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:200) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_items(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_290_000 picoseconds. - Weight::from_parts(1_370_000, 3481) - // Standard Error: 3_198 - .saturating_add(Weight::from_parts(4_435_305, 0).saturating_mul(b.into())) + // Minimum execution time: 3_180_000 picoseconds. + Weight::from_parts(2_015_490, 3481) + // Standard Error: 6_052 + .saturating_add(Weight::from_parts(14_837_077, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } - /// Storage: Refungible TokensMinted (r:1 w:1) - /// Proof: Refungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:200 w:200) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:0 w:200) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:0 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:200) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::TokensMinted` (r:1 w:1) + /// Proof: `Refungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:200 w:200) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:0 w:200) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:0 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:200) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex_multiple_owners(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 1_730_000 picoseconds. - Weight::from_parts(1_810_000, 3481) - // Standard Error: 1_923 - .saturating_add(Weight::from_parts(3_500_817, 0).saturating_mul(b.into())) + // Minimum execution time: 5_200_000 picoseconds. + Weight::from_parts(25_301_631, 3481) + // Standard Error: 6_177 + .saturating_add(Weight::from_parts(11_197_931, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(b.into())) } - /// Storage: Refungible Balance (r:3 w:1) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:3 w:1) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn burn_item_partial() -> Weight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 14_010_000 picoseconds. - Weight::from_parts(16_300_000, 8682) + // Minimum execution time: 29_540_000 picoseconds. + Weight::from_parts(30_190_000, 8682) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Refungible Balance (r:1 w:1) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokensBurnt (r:1 w:1) - /// Proof: Refungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:0 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:1 w:1) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TokensBurnt` (r:1 w:1) + /// Proof: `Refungible::TokensBurnt` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TokenProperties` (r:0 w:1) + /// Proof: `Refungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn burn_item_fully() -> Weight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 13_700_000 picoseconds. - Weight::from_parts(14_180_000, 3554) + // Minimum execution time: 30_650_000 picoseconds. + Weight::from_parts(31_370_000, 3554) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) fn transfer_normal() -> Weight { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 8_990_000 picoseconds. - Weight::from_parts(9_400_000, 6118) + // Minimum execution time: 18_530_000 picoseconds. + Weight::from_parts(19_010_000, 6118) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_creating() -> Weight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 10_240_000 picoseconds. - Weight::from_parts(10_610_000, 6118) + // Minimum execution time: 24_240_000 picoseconds. + Weight::from_parts(24_760_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_removing() -> Weight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 12_040_000 picoseconds. - Weight::from_parts(12_390_000, 6118) + // Minimum execution time: 25_990_000 picoseconds. + Weight::from_parts(26_650_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:2 w:2) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:2) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:2 w:2) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:2) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_creating_removing() -> Weight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 11_940_000 picoseconds. - Weight::from_parts(12_240_000, 6118) + // Minimum execution time: 29_550_000 picoseconds. + Weight::from_parts(30_530_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Refungible Balance (r:1 w:0) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible Allowance (r:0 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:1 w:0) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Allowance` (r:0 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 5_150_000 picoseconds. - Weight::from_parts(5_440_000, 3554) + // Minimum execution time: 11_420_000 picoseconds. + Weight::from_parts(11_810_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Refungible Balance (r:1 w:0) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible Allowance (r:0 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) + /// Storage: `Refungible::Balance` (r:1 w:0) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Allowance` (r:0 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) fn approve_from() -> Weight { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 5_170_000 picoseconds. - Weight::from_parts(5_400_000, 3554) + // Minimum execution time: 11_610_000 picoseconds. + Weight::from_parts(11_950_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Refungible Allowance (r:1 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) + /// Storage: `Refungible::Allowance` (r:1 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) fn transfer_from_normal() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 13_150_000 picoseconds. - Weight::from_parts(13_600_000, 6118) + // Minimum execution time: 28_510_000 picoseconds. + Weight::from_parts(29_180_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Refungible Allowance (r:1 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Allowance` (r:1 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_from_creating() -> Weight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 14_280_000 picoseconds. - Weight::from_parts(14_680_000, 6118) + // Minimum execution time: 34_370_000 picoseconds. + Weight::from_parts(35_270_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Refungible Allowance (r:1 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Allowance` (r:1 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_from_removing() -> Weight { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 16_110_000 picoseconds. - Weight::from_parts(16_710_000, 6118) + // Minimum execution time: 36_490_000 picoseconds. + Weight::from_parts(37_160_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Refungible Allowance (r:1 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:2 w:2) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:2 w:2) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:2) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) + /// Storage: `Refungible::Allowance` (r:1 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:2 w:2) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:2 w:2) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:0) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:2) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn transfer_from_creating_removing() -> Weight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 16_130_000 picoseconds. - Weight::from_parts(16_680_000, 6118) + // Minimum execution time: 40_080_000 picoseconds. + Weight::from_parts(48_310_000, 6118) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Refungible Allowance (r:1 w:1) - /// Proof: Refungible Allowance (max_values: None, max_size: Some(105), added: 2580, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:1 w:1) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible AccountBalance (r:1 w:1) - /// Proof: Refungible AccountBalance (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Refungible TokensBurnt (r:1 w:1) - /// Proof: Refungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Refungible Owned (r:0 w:1) - /// Proof: Refungible Owned (max_values: None, max_size: Some(74), added: 2549, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:0 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Refungible::Allowance` (r:1 w:1) + /// Proof: `Refungible::Allowance` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:1 w:1) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TotalSupply` (r:1 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::AccountBalance` (r:1 w:1) + /// Proof: `Refungible::AccountBalance` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TokensBurnt` (r:1 w:1) + /// Proof: `Refungible::TokensBurnt` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Owned` (r:0 w:1) + /// Proof: `Refungible::Owned` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Storage: `Refungible::TokenProperties` (r:0 w:1) + /// Proof: `Refungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn burn_from() -> Weight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 18_380_000 picoseconds. - Weight::from_parts(18_870_000, 3570) + // Minimum execution time: 41_100_000 picoseconds. + Weight::from_parts(42_060_000, 3570) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:1) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 64]`. - fn set_token_property_permissions(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `20191` - // Minimum execution time: 580_000 picoseconds. - Weight::from_parts(660_000, 20191) - // Standard Error: 29_964 - .saturating_add(Weight::from_parts(6_251_766, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// The range of component `b` is `[0, 64]`. - fn set_token_properties(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `502 + b * (261 ±0)` - // Estimated: `36269` - // Minimum execution time: 350_000 picoseconds. - Weight::from_parts(2_269_806, 36269) - // Standard Error: 7_751 - .saturating_add(Weight::from_parts(3_068_126, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: Refungible TokenProperties (r:1 w:0) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Refungible::TokenProperties` (r:1 w:0) + /// Proof: `Refungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn load_token_properties() -> Weight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 1_010_000 picoseconds. - Weight::from_parts(1_080_000, 36269) + // Minimum execution time: 2_520_000 picoseconds. + Weight::from_parts(2_670_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Refungible TokenProperties (r:0 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Refungible::TokenProperties` (r:0 w:1) + /// Proof: `Refungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 64]`. fn write_token_properties(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 70_000 picoseconds. - Weight::from_parts(1_363_449, 0) - // Standard Error: 8_964 - .saturating_add(Weight::from_parts(2_665_759, 0).saturating_mul(b.into())) + // Minimum execution time: 490_000 picoseconds. + Weight::from_parts(3_457_547, 0) + // Standard Error: 24_239 + .saturating_add(Weight::from_parts(19_382_722, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionPropertyPermissions (r:1 w:0) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Refungible TotalSupply (r:1 w:0) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Common::CollectionPropertyPermissions` (r:1 w:1) + /// Proof: `Common::CollectionPropertyPermissions` (`max_values`: None, `max_size`: Some(16726), added: 19201, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 64]`. - fn delete_token_properties(b: u32, ) -> Weight { + fn set_token_property_permissions(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `561 + b * (33291 ±0)` - // Estimated: `36269` - // Minimum execution time: 320_000 picoseconds. - Weight::from_parts(370_000, 36269) - // Standard Error: 28_541 - .saturating_add(Weight::from_parts(9_863_065, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `314` + // Estimated: `20191` + // Minimum execution time: 1_500_000 picoseconds. + Weight::from_parts(1_590_000, 20191) + // Standard Error: 123_927 + .saturating_add(Weight::from_parts(27_355_093, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Refungible TotalSupply (r:1 w:1) - /// Proof: Refungible TotalSupply (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Refungible Balance (r:1 w:1) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Refungible::TotalSupply` (r:1 w:1) + /// Proof: `Refungible::TotalSupply` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Refungible::Balance` (r:1 w:1) + /// Proof: `Refungible::Balance` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn repartition_item() -> Weight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 6_320_000 picoseconds. - Weight::from_parts(6_640_000, 3554) + // Minimum execution time: 14_340_000 picoseconds. + Weight::from_parts(14_590_000, 3554) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Refungible Balance (r:2 w:0) - /// Proof: Refungible Balance (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - fn token_owner() -> Weight { - // Proof Size summary in bytes: - // Measured: `288` - // Estimated: `6118` - // Minimum execution time: 2_520_000 picoseconds. - Weight::from_parts(2_680_000, 6118) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - } - /// Storage: Refungible CollectionAllowance (r:0 w:1) - /// Proof: Refungible CollectionAllowance (max_values: None, max_size: Some(111), added: 2586, mode: MaxEncodedLen) + /// Storage: `Refungible::CollectionAllowance` (r:0 w:1) + /// Proof: `Refungible::CollectionAllowance` (`max_values`: None, `max_size`: Some(111), added: 2586, mode: `MaxEncodedLen`) fn set_allowance_for_all() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_070_000 picoseconds. - Weight::from_parts(2_230_000, 0) + // Minimum execution time: 6_390_000 picoseconds. + Weight::from_parts(6_650_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Refungible CollectionAllowance (r:1 w:0) - /// Proof: Refungible CollectionAllowance (max_values: None, max_size: Some(111), added: 2586, mode: MaxEncodedLen) + /// Storage: `Refungible::CollectionAllowance` (r:1 w:0) + /// Proof: `Refungible::CollectionAllowance` (`max_values`: None, `max_size`: Some(111), added: 2586, mode: `MaxEncodedLen`) fn allowance_for_all() -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 1_270_000 picoseconds. - Weight::from_parts(1_420_000, 3576) + // Minimum execution time: 3_060_000 picoseconds. + Weight::from_parts(3_210_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Refungible TokenProperties (r:1 w:1) - /// Proof: Refungible TokenProperties (max_values: None, max_size: Some(32804), added: 35279, mode: MaxEncodedLen) + /// Storage: `Refungible::TokenProperties` (r:1 w:1) + /// Proof: `Refungible::TokenProperties` (`max_values`: None, `max_size`: Some(32804), added: 35279, mode: `MaxEncodedLen`) fn repair_item() -> Weight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 1_010_000 picoseconds. - Weight::from_parts(1_160_000, 36269) + // Minimum execution time: 2_480_000 picoseconds. + Weight::from_parts(2_620_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } From ae913530fe5301277f97e9ba13781391acac3044 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Fri, 13 Oct 2023 13:12:47 +0200 Subject: [PATCH 127/143] fix: remove unneeded PhantomData --- pallets/common/src/lib.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index c84c73ffed..f5a35751f9 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -875,7 +875,7 @@ pub mod pallet { enum LazyValueState<'a, T> { Pending(Box T + 'a>), - InProgress(PhantomData>), + InProgress, Computed(T), } @@ -930,13 +930,9 @@ impl<'a, T> LazyValue<'a, T> { return; } - match sp_std::mem::replace(&mut self.state, InProgress(PhantomData)) { + match sp_std::mem::replace(&mut self.state, InProgress) { Pending(f) => self.state = Computed(f()), - _ => { - // Computed is ruled out by the above condition - // InProgress is ruled out by not implementing Sync and absence of recursion - unreachable!() - } + _ => panic!("recursion isn't supported"), } } } From 442d14a2f734e716b07202762d1edc9afe96f114 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Fri, 13 Oct 2023 13:16:04 +0200 Subject: [PATCH 128/143] fix: mint_with_props_weight --- pallets/nonfungible/src/common.rs | 8 ++++++-- pallets/refungible/src/common.rs | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/pallets/nonfungible/src/common.rs b/pallets/nonfungible/src/common.rs index 3e3c94cdce..b5ee050515 100644 --- a/pallets/nonfungible/src/common.rs +++ b/pallets/nonfungible/src/common.rs @@ -110,12 +110,16 @@ impl CommonWeightInfo for CommonWeights { } } +/// Weight of minting tokens with properties +/// * `create_no_data_weight` -- the weight of minting without properties +/// * `token_properties_nums` -- number of properties of each token +#[inline] pub(crate) fn mint_with_props_weight( create_no_data_weight: Weight, - tokens: impl Iterator + Clone, + token_properties_nums: impl Iterator + Clone, ) -> Weight { create_no_data_weight.saturating_add(write_token_properties_total_weight::( - tokens, + token_properties_nums, >::write_token_properties, )) } diff --git a/pallets/refungible/src/common.rs b/pallets/refungible/src/common.rs index 1adbe59c5f..0c55e2eb99 100644 --- a/pallets/refungible/src/common.rs +++ b/pallets/refungible/src/common.rs @@ -130,12 +130,16 @@ impl CommonWeightInfo for CommonWeights { } } +/// Weight of minting tokens with properties +/// * `create_no_data_weight` -- the weight of minting without properties +/// * `token_properties_nums` -- number of properties of each token +#[inline] pub(crate) fn mint_with_props_weight( create_no_data_weight: Weight, - tokens: impl Iterator + Clone, + token_properties_nums: impl Iterator + Clone, ) -> Weight { create_no_data_weight.saturating_add(write_token_properties_total_weight::( - tokens, + token_properties_nums, >::write_token_properties, )) } From 1367a5fc7250ca991d074c2406892b3fb65465ec Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Fri, 13 Oct 2023 13:17:20 +0200 Subject: [PATCH 129/143] fix: use saturating_add --- pallets/refungible/src/common.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/refungible/src/common.rs b/pallets/refungible/src/common.rs index 0c55e2eb99..94987e88e1 100644 --- a/pallets/refungible/src/common.rs +++ b/pallets/refungible/src/common.rs @@ -83,7 +83,7 @@ impl CommonWeightInfo for CommonWeights { fn set_token_properties(amount: u32) -> Weight { write_token_properties_total_weight::([amount].into_iter(), |amount| { >::load_token_properties() - + >::write_token_properties(amount) + .saturating_add(>::write_token_properties(amount)) }) } From 4819755773f42235d22bf281cf0269c0169c2dcf Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Fri, 13 Oct 2023 14:22:53 +0200 Subject: [PATCH 130/143] fix: unit tests --- runtime/tests/src/lib.rs | 1 + runtime/tests/src/tests.rs | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/runtime/tests/src/lib.rs b/runtime/tests/src/lib.rs index 6a5b461d75..45fd20f2db 100644 --- a/runtime/tests/src/lib.rs +++ b/runtime/tests/src/lib.rs @@ -292,6 +292,7 @@ impl pallet_unique::Config for Test { type WeightInfo = (); type CommonWeightInfo = CommonWeights; type RefungibleExtensionsWeightInfo = CommonWeights; + type StructureWeightInfo = pallet_structure::weights::SubstrateWeight; } // Build genesis storage according to the mock runtime. diff --git a/runtime/tests/src/tests.rs b/runtime/tests/src/tests.rs index 9f10c0eb65..e912646d7b 100644 --- a/runtime/tests/src/tests.rs +++ b/runtime/tests/src/tests.rs @@ -2624,10 +2624,10 @@ mod check_token_permissions { use super::*; - fn test bool>( + fn test( i: usize, test_case: &pallet_common::tests::TestCase, - check_token_existence: &mut LazyValue, + check_token_existence: &mut LazyValue, ) { let collection_admin = test_case.collection_admin; let mut is_collection_admin = LazyValue::new(|| test_case.is_collection_admin); @@ -2635,7 +2635,7 @@ mod check_token_permissions { let mut is_token_owner = LazyValue::new(|| Ok(test_case.is_token_owner)); let is_no_permission = test_case.no_permission; - let result = pallet_common::tests::check_token_permissions::( + let result = pallet_common::tests::check_token_permissions::( collection_admin, token_owner, &mut is_collection_admin, From 478ee33488215a1ac4cf53f9c12895352709963a Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Fri, 13 Oct 2023 14:38:39 +0200 Subject: [PATCH 131/143] fix: clippy warns --- pallets/common/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index f5a35751f9..ae3910da28 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -2687,7 +2687,7 @@ where ) -> DispatchResult { let check_token_exist = || true; let check_token_owner = || Ok(true); - let get_token_properties = || TokenProperties::new(); + let get_token_properties = TokenProperties::new; self.internal_write_token_properties( token_id, From 750f6e16db9b5226050605d727a9c85edfa2ccdd Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Fri, 13 Oct 2023 17:07:14 +0200 Subject: [PATCH 132/143] fix: lookahead collator build --- Cargo.lock | 4 ++++ Cargo.toml | 1 + node/cli/Cargo.toml | 5 ++++- node/cli/src/service.rs | 30 +++++++++++++++++++++++++++--- runtime/common/runtime_apis.rs | 11 +++++++++++ runtime/opal/Cargo.toml | 3 +++ 6 files changed, 50 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8c1591024..145f688950 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6450,6 +6450,7 @@ dependencies = [ "cumulus-pallet-parachain-system", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-timestamp", "cumulus-primitives-utility", @@ -8109,12 +8110,14 @@ dependencies = [ "pallet-evm-coder-substrate", "pallet-nonfungible", "pallet-refungible", + "pallet-structure", "parity-scale-codec", "scale-info", "sp-core", "sp-io", "sp-runtime", "sp-std", + "up-common", "up-data-structs", ] @@ -14732,6 +14735,7 @@ dependencies = [ "cumulus-client-consensus-proposer", "cumulus-client-network", "cumulus-client-service", + "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-relay-chain-inprocess-interface", diff --git a/Cargo.toml b/Cargo.toml index 14a4294e82..748f2ada65 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -100,6 +100,7 @@ cumulus-pallet-dmp-queue = { default-features = false, git = "https://github.com cumulus-pallet-parachain-system = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } cumulus-pallet-xcm = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } cumulus-pallet-xcmp-queue = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-primitives-aura = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } cumulus-primitives-core = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } cumulus-primitives-parachain-inherent = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } cumulus-primitives-timestamp = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index 64de1b786c..32acce376d 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -36,6 +36,7 @@ cumulus-client-consensus-common = { workspace = true } cumulus-client-consensus-proposer = { workspace = true } cumulus-client-network = { workspace = true } cumulus-client-service = { workspace = true } +cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-parachain-inherent = { features = ["std"], workspace = true } cumulus-relay-chain-inprocess-interface = { workspace = true } @@ -113,7 +114,9 @@ gov-test-timings = [ 'quartz-runtime?/gov-test-timings', 'unique-runtime?/gov-test-timings', ] -lookahead = [] +lookahead = [ + 'opal-runtime/lookahead' +] pov-estimate = [ 'opal-runtime/pov-estimate', 'quartz-runtime?/pov-estimate', diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index ce16688038..f16e6e799c 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -221,6 +221,14 @@ ez_bounds!( { } ); +#[cfg(not(feature = "lookahead"))] +ez_bounds!( + pub trait LookaheadApiDep {} +); +#[cfg(feature = "lookahead")] +ez_bounds!( + pub trait LookaheadApiDep: cumulus_primitives_aura::AuraUnincludedSegmentApi {} +); /// Starts a `ServiceBuilder` for a full service. /// @@ -358,6 +366,7 @@ where + Sync + 'static, RuntimeApi::RuntimeApi: RuntimeApiDep + 'static, + RuntimeApi::RuntimeApi: LookaheadApiDep, Runtime: RuntimeInstance, ExecutorDispatch: NativeExecutionDispatch + 'static, { @@ -687,6 +696,8 @@ pub struct StartConsensusParameters<'a> { announce_block: Arc>) + Send + Sync>, } +// Clones ignored for optional lookahead collator +#[allow(clippy::redundant_clone)] pub fn start_consensus( client: Arc>, transaction_pool: Arc< @@ -701,6 +712,7 @@ where + Sync + 'static, RuntimeApi::RuntimeApi: RuntimeApiDep + 'static, + RuntimeApi::RuntimeApi: LookaheadApiDep, Runtime: RuntimeInstance, { let StartConsensusParameters { @@ -735,12 +747,12 @@ where client.clone(), ); - let block_import = ParachainBlockImport::new(client.clone(), backend); + let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); let params = BuildAuraConsensusParams { create_inherent_data_providers: move |_, ()| async move { Ok(()) }, block_import, - para_client: client, + para_client: client.clone(), #[cfg(feature = "lookahead")] para_backend: backend, para_id, @@ -751,10 +763,19 @@ where proposer, collator_service, // With async-baking, we allowed to be both slower (longer authoring) and faster (multiple para blocks per relay block) + #[cfg(not(feature = "lookahead"))] authoring_duration: Duration::from_millis(500), + #[cfg(feature = "lookahead")] + authoring_duration: Duration::from_millis(1500), overseer_handle, #[cfg(feature = "lookahead")] - code_hash_provider: || {}, + code_hash_provider: move |block_hash| { + client + .code_at(block_hash) + .ok() + .map(cumulus_primitives_core::relay_chain::ValidationCode) + .map(|c| c.hash()) + }, collator_key, relay_chain_slot_duration, }; @@ -762,7 +783,10 @@ where task_manager.spawn_essential_handle().spawn( "aura", None, + #[cfg(not(feature = "lookahead"))] run_aura::<_, AuraAuthorityPair, _, _, _, _, _, _, _>(params), + #[cfg(feature = "lookahead")] + run_aura::<_, AuraAuthorityPair, _, _, _, _, _, _, _, _, _>(params), ); Ok(()) } diff --git a/runtime/common/runtime_apis.rs b/runtime/common/runtime_apis.rs index 04910ae2dc..3c42220c1e 100644 --- a/runtime/common/runtime_apis.rs +++ b/runtime/common/runtime_apis.rs @@ -679,6 +679,17 @@ macro_rules! impl_common_runtime_apis { } } + #[cfg(feature = "lookahead")] + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + _included_hash: ::Hash, + _slot: cumulus_primitives_aura::Slot, + ) -> bool { + // FIXME: Limit velocity + true + } + } + /// Should never be used, yet still required because of https://github.com/paritytech/polkadot-sdk/issues/27 /// Not allowed to panic, because rpc may be called using native runtime, thus causing thread panic. impl fp_rpc::ConvertTransactionRuntimeApi for Runtime { diff --git a/runtime/opal/Cargo.toml b/runtime/opal/Cargo.toml index 5e9e63723b..ee11854589 100644 --- a/runtime/opal/Cargo.toml +++ b/runtime/opal/Cargo.toml @@ -69,6 +69,7 @@ std = [ 'cumulus-pallet-parachain-system/std', 'cumulus-pallet-xcm/std', 'cumulus-pallet-xcmp-queue/std', + 'cumulus-primitives-aura/std', 'cumulus-primitives-core/std', 'cumulus-primitives-utility/std', 'frame-executive/std', @@ -230,6 +231,7 @@ governance = [] preimage = [] refungible = [] session-test-timings = [] +lookahead = [] ################################################################################ # local dependencies @@ -240,6 +242,7 @@ cumulus-pallet-dmp-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-timestamp = { workspace = true } cumulus-primitives-utility = { workspace = true } From 6f9bef9c71c98940630c8526f647fdd55734d412 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 16 Oct 2023 11:59:59 +0200 Subject: [PATCH 133/143] fix: max create multiple items --- primitives/data-structs/src/lib.rs | 2 +- tests/src/performance.seq.test.ts | 49 ++++-------------------------- 2 files changed, 7 insertions(+), 44 deletions(-) diff --git a/primitives/data-structs/src/lib.rs b/primitives/data-structs/src/lib.rs index 74aeafeace..95ae8eb8eb 100644 --- a/primitives/data-structs/src/lib.rs +++ b/primitives/data-structs/src/lib.rs @@ -135,7 +135,7 @@ pub const MAX_TOKEN_PROPERTIES_SIZE: u32 = 32768; /// How much items can be created per single /// create_many call. -pub const MAX_ITEMS_PER_BATCH: u32 = 200; +pub const MAX_ITEMS_PER_BATCH: u32 = 120; /// Used for limit bounded types of token custom data. pub type CustomDataLimit = ConstU32; diff --git a/tests/src/performance.seq.test.ts b/tests/src/performance.seq.test.ts index e049e217d9..22059b7b9c 100644 --- a/tests/src/performance.seq.test.ts +++ b/tests/src/performance.seq.test.ts @@ -22,7 +22,7 @@ import {UniqueHelper} from './util/playgrounds/unique'; describe('Performace tests', () => { let alice: IKeyringPair; - const MAX_TOKENS_TO_MINT = 200; + const MAX_TOKENS_TO_MINT = 120; before(async () => { await usingPlaygrounds(async (helper, privateKey) => { @@ -42,8 +42,6 @@ describe('Performace tests', () => { ], }); - - const results = []; const step = 1_000; const sizeOfKey = sizeOfEncodedStr(propertyKey); let currentSize = step; @@ -56,52 +54,17 @@ describe('Performace tests', () => { startCount = await tryMintExplicit(helper, alice, MAX_TOKENS_TO_MINT, collection.collectionId, {Substrate: alice.address}); minterFunc = tryMintExplicit; } - results.push({propertySize: 0, tokens: startCount}); + + expect(startCount).to.be.equal(MAX_TOKENS_TO_MINT); while(currentSize <= 32_000) { const property = {key: propertyKey, value: 'A'.repeat(currentSize - sizeOfKey - sizeOfInt(currentSize))}; - const maxTokens = Math.ceil(results.map(x => x.tokens).reduce((a, b) => a + b) / results.length); - const tokens = await minterFunc(helper, alice, maxTokens, collection.collectionId, {Substrate: alice.address}, property); - results.push({propertySize: sizeOfProperty(property), tokens}); + const tokens = await minterFunc(helper, alice, MAX_TOKENS_TO_MINT, collection.collectionId, {Substrate: alice.address}, property); + expect(tokens).to.be.equal(MAX_TOKENS_TO_MINT); + currentSize += step; await helper.wait.newBlocks(2); } - - expect(results).to.be.deep.equal([ - {propertySize: 0, tokens: 200}, - {propertySize: 1000, tokens: 149}, - {propertySize: 2000, tokens: 149}, - {propertySize: 3000, tokens: 149}, - {propertySize: 4000, tokens: 149}, - {propertySize: 5000, tokens: 149}, - {propertySize: 6000, tokens: 149}, - {propertySize: 7000, tokens: 149}, - {propertySize: 8000, tokens: 149}, - {propertySize: 9000, tokens: 149}, - {propertySize: 10000, tokens: 149}, - {propertySize: 11000, tokens: 149}, - {propertySize: 12000, tokens: 149}, - {propertySize: 13000, tokens: 149}, - {propertySize: 14000, tokens: 149}, - {propertySize: 15000, tokens: 149}, - {propertySize: 16000, tokens: 149}, - {propertySize: 17000, tokens: 149}, - {propertySize: 18000, tokens: 149}, - {propertySize: 19000, tokens: 149}, - {propertySize: 20000, tokens: 149}, - {propertySize: 21000, tokens: 149}, - {propertySize: 22000, tokens: 149}, - {propertySize: 23000, tokens: 149}, - {propertySize: 24000, tokens: 149}, - {propertySize: 25000, tokens: 149}, - {propertySize: 26000, tokens: 149}, - {propertySize: 27000, tokens: 145}, - {propertySize: 28000, tokens: 140}, - {propertySize: 29000, tokens: 135}, - {propertySize: 30000, tokens: 130}, - {propertySize: 31000, tokens: 126}, - {propertySize: 32000, tokens: 122}, - ]); }); }); From d56113dcf9f7b2b9eed2fb284a38984eb0a4bf5f Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 16 Oct 2023 12:01:28 +0200 Subject: [PATCH 134/143] fix: remove unused fn in perf test --- tests/src/performance.seq.test.ts | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/src/performance.seq.test.ts b/tests/src/performance.seq.test.ts index 22059b7b9c..66e153b76a 100644 --- a/tests/src/performance.seq.test.ts +++ b/tests/src/performance.seq.test.ts @@ -105,10 +105,6 @@ const tryMintExplicit = async (helper: UniqueHelper, signer: IKeyringPair, token return tokensCount; }; -function sizeOfProperty(prop: IProperty) { - return sizeOfEncodedStr(prop.key) + sizeOfEncodedStr(prop.value!); -} - function sizeOfInt(i: number) { if(i < 0 || i > 0xffffffff) throw new Error('out of range'); if(i < 0b11_1111) { From 8c199c3011beddecfd65757fa5cf9fd429a9925b Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Mon, 16 Oct 2023 13:13:48 +0200 Subject: [PATCH 135/143] fix(inflation): BlockNumberProvider --- pallets/inflation/src/lib.rs | 6 +++--- runtime/common/config/pallets/mod.rs | 25 +++++++++++++++++++++++-- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/pallets/inflation/src/lib.rs b/pallets/inflation/src/lib.rs index e49af7e0f4..6b9bf3a3e4 100644 --- a/pallets/inflation/src/lib.rs +++ b/pallets/inflation/src/lib.rs @@ -70,8 +70,8 @@ pub mod pallet { + Mutate; type TreasuryAccountId: Get; - // The block number provider - type BlockNumberProvider: BlockNumberProvider>; + // The block number provider, which should be callable from `on_initialize` hook. + type OnInitializeBlockNumberProvider: BlockNumberProvider>; /// Number of blocks that pass between treasury balance updates due to inflation #[pallet::constant] @@ -118,7 +118,7 @@ pub mod pallet { }; let block_interval: u32 = T::InflationBlockInterval::get().try_into().unwrap_or(0); - let current_relay_block = T::BlockNumberProvider::current_block_number(); + let current_relay_block = T::OnInitializeBlockNumberProvider::current_block_number(); let next_inflation: BlockNumberFor = >::get(); add_weight(1, 0, Weight::from_parts(5_000_000, 0)); diff --git a/runtime/common/config/pallets/mod.rs b/runtime/common/config/pallets/mod.rs index ee0694582c..5df782afdb 100644 --- a/runtime/common/config/pallets/mod.rs +++ b/runtime/common/config/pallets/mod.rs @@ -21,7 +21,7 @@ use frame_support::{ traits::{ConstU32, ConstU64, Currency}, }; use sp_arithmetic::Perbill; -use sp_runtime::traits::AccountIdConversion; +use sp_runtime::traits::{BlockNumberProvider, AccountIdConversion}; use up_common::{ constants::*, types::{AccountId, Balance, BlockNumber}, @@ -105,12 +105,33 @@ parameter_types! { pub const InflationBlockInterval: BlockNumber = 100; // every time per how many blocks inflation is applied } +/// Pallet-inflation needs block number in on_initialize, where there is no `validation_data` exists yet +pub struct OnInitializeBlockNumberProvider; +impl BlockNumberProvider for OnInitializeBlockNumberProvider { + type BlockNumber = BlockNumber; + + fn current_block_number() -> Self::BlockNumber { + use parity_scale_codec::Decode; + use hex_literal::hex; + use sp_io::storage; + // TODO: Replace with the following code after https://github.com/paritytech/polkadot-sdk/commit/3ea497b5a0fdda252f9c5a3c257cfaf8685f02fd lands + // >::last_relay_block_number() + + // ParachainSystem.LastRelayChainBlockNumber + let Some(encoded) = storage::get(&hex!("45323df7cc47150b3930e2666b0aa313a2bca190d36bd834cc73a38fc213ecbd")) else { + // First parachain block + return Default::default() + }; + BlockNumber::decode(&mut encoded.as_ref()).expect("typeof(RelayBlockNumber) == typeof(BlockNumber) == u32; qed") + } +} + /// Used for the pallet inflation impl pallet_inflation::Config for Runtime { type Currency = Balances; type TreasuryAccountId = TreasuryAccountId; type InflationBlockInterval = InflationBlockInterval; - type BlockNumberProvider = RelayChainBlockNumberProvider; + type OnInitializeBlockNumberProvider = OnInitializeBlockNumberProvider; } impl pallet_unique::Config for Runtime { From 31bae1fc2671a2a40d3984fba83b17aea9643d29 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 16 Oct 2023 14:37:17 +0200 Subject: [PATCH 136/143] fix: separate depositor from sender in transfer_internal (#1013) * fix: transfer-from * fix: remove Option from nester type * fix: check_nesting comment * fix: rename nester to depositor, add docs --- pallets/balances-adapter/src/common.rs | 2 +- pallets/common/src/lib.rs | 2 +- pallets/fungible/src/common.rs | 2 +- pallets/fungible/src/lib.rs | 22 ++++++++++++++++--- pallets/nonfungible/src/common.rs | 2 +- pallets/nonfungible/src/lib.rs | 24 +++++++++++++++++---- pallets/refungible/src/common.rs | 2 +- pallets/refungible/src/lib.rs | 30 +++++++++++++++++++++++--- pallets/structure/src/lib.rs | 4 ++-- 9 files changed, 73 insertions(+), 17 deletions(-) diff --git a/pallets/balances-adapter/src/common.rs b/pallets/balances-adapter/src/common.rs index b03a95124b..7e9bb38708 100644 --- a/pallets/balances-adapter/src/common.rs +++ b/pallets/balances-adapter/src/common.rs @@ -221,7 +221,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { fn check_nesting( &self, - _sender: ::CrossAccountId, + _sender: &::CrossAccountId, _from: (up_data_structs::CollectionId, TokenId), _under: TokenId, _budget: &dyn up_data_structs::budget::Budget, diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index ae3910da28..d2f31a58c6 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -2194,7 +2194,7 @@ pub trait CommonCollectionOperations { /// * `budget` - The maximum budget that can be spent on the check. fn check_nesting( &self, - sender: T::CrossAccountId, + sender: &T::CrossAccountId, from: (CollectionId, TokenId), under: TokenId, budget: &dyn Budget, diff --git a/pallets/fungible/src/common.rs b/pallets/fungible/src/common.rs index c998d697d2..0512bc886b 100644 --- a/pallets/fungible/src/common.rs +++ b/pallets/fungible/src/common.rs @@ -335,7 +335,7 @@ impl CommonCollectionOperations for FungibleHandle { fn check_nesting( &self, - _sender: ::CrossAccountId, + _sender: &::CrossAccountId, _from: (CollectionId, TokenId), _under: TokenId, _nesting_budget: &dyn Budget, diff --git a/pallets/fungible/src/lib.rs b/pallets/fungible/src/lib.rs index 9c8291a8d2..b2ab9bbcc0 100644 --- a/pallets/fungible/src/lib.rs +++ b/pallets/fungible/src/lib.rs @@ -383,6 +383,21 @@ impl Pallet { to: &T::CrossAccountId, amount: u128, nesting_budget: &dyn Budget, + ) -> DispatchResultWithPostInfo { + let depositor = from; + Self::transfer_internal(collection, depositor, from, to, amount, nesting_budget) + } + + /// Transfers tokens from the `from` account to the `to` account. + /// The `depositor` is the account who deposits the tokens. + /// For instance, the nesting rules will be checked against the `depositor`'s permissions. + fn transfer_internal( + collection: &FungibleHandle, + depositor: &T::CrossAccountId, + from: &T::CrossAccountId, + to: &T::CrossAccountId, + amount: u128, + nesting_budget: &dyn Budget, ) -> DispatchResultWithPostInfo { ensure!( collection.limits.transfers_enabled(), @@ -416,7 +431,7 @@ impl Pallet { // from != to && amount != 0 >::nest_if_sent_to_token( - from.clone(), + depositor, to, collection.id, TokenId::default(), @@ -473,7 +488,7 @@ impl Pallet { for (to, _) in data.iter() { >::check_nesting( - sender.clone(), + sender, to, collection.id, TokenId::default(), @@ -730,7 +745,8 @@ impl Pallet { // ========= - let mut result = Self::transfer(collection, from, to, amount, nesting_budget); + let mut result = + Self::transfer_internal(collection, spender, from, to, amount, nesting_budget); add_weight_to_post_info(&mut result, >::check_allowed_raw()); result?; diff --git a/pallets/nonfungible/src/common.rs b/pallets/nonfungible/src/common.rs index b5ee050515..a6eb621cd3 100644 --- a/pallets/nonfungible/src/common.rs +++ b/pallets/nonfungible/src/common.rs @@ -407,7 +407,7 @@ impl CommonCollectionOperations for NonfungibleHandle { fn check_nesting( &self, - sender: T::CrossAccountId, + sender: &T::CrossAccountId, from: (CollectionId, TokenId), under: TokenId, nesting_budget: &dyn Budget, diff --git a/pallets/nonfungible/src/lib.rs b/pallets/nonfungible/src/lib.rs index c309f294ac..69d353ce0d 100644 --- a/pallets/nonfungible/src/lib.rs +++ b/pallets/nonfungible/src/lib.rs @@ -717,6 +717,21 @@ impl Pallet { to: &T::CrossAccountId, token: TokenId, nesting_budget: &dyn Budget, + ) -> DispatchResultWithPostInfo { + let depositor = from; + Self::transfer_internal(collection, depositor, from, to, token, nesting_budget) + } + + /// Transfers an NFT from the `from` account to the `to` account. + /// The `depositor` is the account who deposits the NFT. + /// For instance, the nesting rules will be checked against the `depositor`'s permissions. + pub fn transfer_internal( + collection: &NonfungibleHandle, + depositor: &T::CrossAccountId, + from: &T::CrossAccountId, + to: &T::CrossAccountId, + token: TokenId, + nesting_budget: &dyn Budget, ) -> DispatchResultWithPostInfo { ensure!( collection.limits.transfers_enabled(), @@ -754,7 +769,7 @@ impl Pallet { }; >::nest_if_sent_to_token( - from.clone(), + depositor, to, collection.id, token, @@ -860,7 +875,7 @@ impl Pallet { let token = TokenId(first_token + i as u32 + 1); >::check_nesting( - sender.clone(), + sender, &data.owner, collection.id, token, @@ -1154,7 +1169,8 @@ impl Pallet { // ========= // Allowance is reset in [`transfer`] - let mut result = Self::transfer(collection, from, to, token, nesting_budget); + let mut result = + Self::transfer_internal(collection, spender, from, to, token, nesting_budget); add_weight_to_post_info(&mut result, >::check_allowed_raw()); result } @@ -1183,7 +1199,7 @@ impl Pallet { /// pub fn check_nesting( handle: &NonfungibleHandle, - sender: T::CrossAccountId, + sender: &T::CrossAccountId, from: (CollectionId, TokenId), under: TokenId, nesting_budget: &dyn Budget, diff --git a/pallets/refungible/src/common.rs b/pallets/refungible/src/common.rs index 94987e88e1..afd26b234c 100644 --- a/pallets/refungible/src/common.rs +++ b/pallets/refungible/src/common.rs @@ -416,7 +416,7 @@ impl CommonCollectionOperations for RefungibleHandle { fn check_nesting( &self, - _sender: ::CrossAccountId, + _sender: &::CrossAccountId, _from: (CollectionId, TokenId), _under: TokenId, _nesting_budget: &dyn Budget, diff --git a/pallets/refungible/src/lib.rs b/pallets/refungible/src/lib.rs index 8017c6f6f7..5ce0837479 100644 --- a/pallets/refungible/src/lib.rs +++ b/pallets/refungible/src/lib.rs @@ -614,6 +614,30 @@ impl Pallet { token: TokenId, amount: u128, nesting_budget: &dyn Budget, + ) -> DispatchResult { + let depositor = from; + Self::transfer_internal( + collection, + depositor, + from, + to, + token, + amount, + nesting_budget, + ) + } + + /// Transfers RFT tokens from the `from` account to the `to` account. + /// The `depositor` is the account who deposits the tokens. + /// For instance, the nesting rules will be checked against the `depositor`'s permissions. + pub fn transfer_internal( + collection: &RefungibleHandle, + depositor: &T::CrossAccountId, + from: &T::CrossAccountId, + to: &T::CrossAccountId, + token: TokenId, + amount: u128, + nesting_budget: &dyn Budget, ) -> DispatchResult { ensure!( collection.limits.transfers_enabled(), @@ -683,7 +707,7 @@ impl Pallet { // from != to && amount != 0 >::nest_if_sent_to_token( - from.clone(), + depositor, to, collection.id, token, @@ -847,7 +871,7 @@ impl Pallet { let token_id = TokenId(first_token_id + i as u32 + 1); for (to, _) in token.users.iter() { >::check_nesting( - sender.clone(), + sender, to, collection.id, token_id, @@ -1146,7 +1170,7 @@ impl Pallet { // ========= - Self::transfer(collection, from, to, token, amount, nesting_budget)?; + Self::transfer_internal(collection, spender, from, to, token, amount, nesting_budget)?; if let Some(allowance) = allowance { Self::set_allowance_unchecked(collection, from, spender, token, allowance); } diff --git a/pallets/structure/src/lib.rs b/pallets/structure/src/lib.rs index 127983607d..261d608524 100644 --- a/pallets/structure/src/lib.rs +++ b/pallets/structure/src/lib.rs @@ -300,7 +300,7 @@ impl Pallet { /// /// - `nesting_budget`: Limit for searching parents in depth. pub fn check_nesting( - from: T::CrossAccountId, + from: &T::CrossAccountId, under: &T::CrossAccountId, collection_id: CollectionId, token_id: TokenId, @@ -317,7 +317,7 @@ impl Pallet { /// /// - `nesting_budget`: Limit for searching parents in depth. pub fn nest_if_sent_to_token( - from: T::CrossAccountId, + from: &T::CrossAccountId, under: &T::CrossAccountId, collection_id: CollectionId, token_id: TokenId, From 0a49946aa7b27af974aecbb544c319f99d078c7b Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 17 Oct 2023 00:47:48 +0200 Subject: [PATCH 137/143] fix: forbid non-1 tokenid in native fungibles (#1015) * fix: native fungibles collection * refactor: move FungibleItemsHaveNoId to pallet-common --- pallets/balances-adapter/src/common.rs | 54 +++++++++++++++----------- pallets/balances-adapter/src/erc.rs | 16 ++------ pallets/balances-adapter/src/lib.rs | 18 +++------ pallets/common/src/lib.rs | 3 ++ pallets/fungible/src/common.rs | 14 +++---- pallets/fungible/src/lib.rs | 2 - 6 files changed, 52 insertions(+), 55 deletions(-) diff --git a/pallets/balances-adapter/src/common.rs b/pallets/balances-adapter/src/common.rs index 7e9bb38708..f6dbac1be3 100644 --- a/pallets/balances-adapter/src/common.rs +++ b/pallets/balances-adapter/src/common.rs @@ -1,9 +1,9 @@ use alloc::{vec, vec::Vec}; use core::marker::PhantomData; -use frame_support::{fail, weights::Weight}; +use frame_support::{ensure, fail, weights::Weight}; use pallet_balances::{weights::SubstrateWeight as BalancesWeight, WeightInfo}; -use pallet_common::{CommonCollectionOperations, CommonWeightInfo}; +use pallet_common::{CommonCollectionOperations, CommonWeightInfo, Error as CommonError}; use up_data_structs::TokenId; use crate::{Config, NativeFungibleHandle, Pallet}; @@ -77,7 +77,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _data: up_data_structs::CreateItemData, _nesting_budget: &dyn up_data_structs::budget::Budget, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn create_multiple_items( @@ -87,7 +87,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _data: Vec, _nesting_budget: &dyn up_data_structs::budget::Budget, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn create_multiple_items_ex( @@ -96,7 +96,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _data: up_data_structs::CreateItemExData<::CrossAccountId>, _nesting_budget: &dyn up_data_structs::budget::Budget, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn burn_item( @@ -105,7 +105,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _token: TokenId, _amount: u128, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn set_collection_properties( @@ -113,7 +113,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _sender: ::CrossAccountId, _properties: Vec, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn delete_collection_properties( @@ -121,7 +121,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _sender: &::CrossAccountId, _property_keys: Vec, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn set_token_properties( @@ -131,7 +131,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _properties: Vec, _budget: &dyn up_data_structs::budget::Budget, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn delete_token_properties( @@ -141,7 +141,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _property_keys: Vec, _budget: &dyn up_data_structs::budget::Budget, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn get_token_properties_raw( @@ -161,18 +161,23 @@ impl CommonCollectionOperations for NativeFungibleHandle { _sender: &::CrossAccountId, _property_permissions: Vec, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn transfer( &self, sender: ::CrossAccountId, to: ::CrossAccountId, - _token: TokenId, + token: TokenId, amount: u128, - budget: &dyn up_data_structs::budget::Budget, + _budget: &dyn up_data_structs::budget::Budget, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - >::transfer(self, &sender, &to, amount, budget) + ensure!( + token == TokenId::default(), + >::FungibleItemsHaveNoId + ); + + >::transfer(&sender, &to, amount) } fn approve( @@ -182,7 +187,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _token: TokenId, _amount: u128, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn approve_from( @@ -193,7 +198,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _token: TokenId, _amount: u128, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn transfer_from( @@ -201,11 +206,16 @@ impl CommonCollectionOperations for NativeFungibleHandle { sender: ::CrossAccountId, from: ::CrossAccountId, to: ::CrossAccountId, - _token: TokenId, + token: TokenId, amount: u128, budget: &dyn up_data_structs::budget::Budget, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - >::transfer_from(self, &sender, &from, &to, amount, budget) + ensure!( + token == TokenId::default(), + >::FungibleItemsHaveNoId + ); + + >::transfer_from(&sender, &from, &to, amount, budget) } fn burn_from( @@ -216,7 +226,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _amount: u128, _budget: &dyn up_data_structs::budget::Budget, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn check_nesting( @@ -226,7 +236,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _under: TokenId, _budget: &dyn up_data_structs::budget::Budget, ) -> frame_support::sp_runtime::DispatchResult { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn nest(&self, _under: TokenId, _to_nest: (up_data_structs::CollectionId, TokenId)) {} @@ -332,7 +342,7 @@ impl CommonCollectionOperations for NativeFungibleHandle { _operator: ::CrossAccountId, _approve: bool, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } fn allowance_for_all( @@ -347,6 +357,6 @@ impl CommonCollectionOperations for NativeFungibleHandle { &self, _token: TokenId, ) -> frame_support::pallet_prelude::DispatchResultWithPostInfo { - fail!(>::UnsupportedOperation); + fail!(>::UnsupportedOperation); } } diff --git a/pallets/balances-adapter/src/erc.rs b/pallets/balances-adapter/src/erc.rs index 90b12327f9..857b49037e 100644 --- a/pallets/balances-adapter/src/erc.rs +++ b/pallets/balances-adapter/src/erc.rs @@ -58,12 +58,8 @@ impl NativeFungibleHandle { let caller = T::CrossAccountId::from_eth(caller); let to = T::CrossAccountId::from_eth(to); let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder() - .weight_calls_budget(>::find_parent()); - >::transfer(self, &caller, &to, amount, &budget) - .map_err(|e| dispatch_to_evm::(e.error))?; + >::transfer(&caller, &to, amount).map_err(|e| dispatch_to_evm::(e.error))?; Ok(true) } @@ -83,7 +79,7 @@ impl NativeFungibleHandle { .recorder() .weight_calls_budget(>::find_parent()); - >::transfer_from(self, &caller, &from, &to, amount, &budget) + >::transfer_from(&caller, &from, &to, amount, &budget) .map_err(|e| dispatch_to_evm::(e.error))?; Ok(true) } @@ -106,12 +102,8 @@ where let caller = T::CrossAccountId::from_eth(caller); let to = to.into_sub_cross_account::()?; let amount = amount.try_into().map_err(|_| "amount overflow")?; - let budget = self - .recorder() - .weight_calls_budget(>::find_parent()); - >::transfer(self, &caller, &to, amount, &budget) - .map_err(|e| dispatch_to_evm::(e.error))?; + >::transfer(&caller, &to, amount).map_err(|e| dispatch_to_evm::(e.error))?; Ok(true) } @@ -137,7 +129,7 @@ where .recorder() .weight_calls_budget(>::find_parent()); - >::transfer_from(self, &caller, &from, &to, amount, &budget) + >::transfer_from(&caller, &from, &to, amount, &budget) .map_err(|e| dispatch_to_evm::(e.error))?; Ok(true) diff --git a/pallets/balances-adapter/src/lib.rs b/pallets/balances-adapter/src/lib.rs index 91997c0b79..aeb792fe92 100644 --- a/pallets/balances-adapter/src/lib.rs +++ b/pallets/balances-adapter/src/lib.rs @@ -155,20 +155,15 @@ pub mod pallet { Ok(Self::balance_of(from)) } - /// Transfers the specified amount of tokens. Will check that - /// the transfer is allowed for the token. + /// Transfers the specified amount of tokens. /// - /// - `collection`: Collection that contains the token. /// - `from`: Owner of tokens to transfer. /// - `to`: Recepient of transfered tokens. /// - `amount`: Amount of tokens to transfer. - /// - `nesting_budget`: Limit for searching parents in-depth to check ownership. pub fn transfer( - _collection: &NativeFungibleHandle, from: &T::CrossAccountId, to: &T::CrossAccountId, amount: u128, - _nesting_budget: &dyn Budget, ) -> DispatchResultWithPostInfo { >::ensure_correct_receiver(to)?; @@ -185,19 +180,18 @@ pub mod pallet { }) } - /// Transfer NFT token from one account to another. + /// Transfer tokens from one account to another. /// - /// Same as the [`Self::transfer`] but spender doesn't needs to be the owner of the token. - /// The owner should set allowance for the spender to transfer token. + /// Same as the [`Self::transfer`] but the spender doesn't needs to be the direct owner of the token. + /// The spender must be allowed to transfer token. + /// If the tokens are nested in an NFT and the spender owns the NFT, the allowance is considered to be set. /// - /// - `collection`: Collection that contains the token. /// - `spender`: Account that spend the money. /// - `from`: Owner of tokens to transfer. /// - `to`: Recepient of transfered tokens. /// - `amount`: Amount of tokens to transfer. /// - `nesting_budget`: Limit for searching parents in-depth to check ownership. pub fn transfer_from( - collection: &NativeFungibleHandle, spender: &T::CrossAccountId, from: &T::CrossAccountId, to: &T::CrossAccountId, @@ -208,7 +202,7 @@ pub mod pallet { if allowance < amount { return Err(>::ApprovedValueTooLow.into()); } - Self::transfer(collection, from, to, amount, nesting_budget) + Self::transfer(from, to, amount) } } } diff --git a/pallets/common/src/lib.rs b/pallets/common/src/lib.rs index d2f31a58c6..50c4420332 100644 --- a/pallets/common/src/lib.rs +++ b/pallets/common/src/lib.rs @@ -783,6 +783,9 @@ pub mod pallet { /// The user is not an administrator. UserIsNotCollectionAdmin, + + /// Fungible tokens hold no ID, and the default value of TokenId for a fungible collection is 0. + FungibleItemsHaveNoId, } /// Storage of the count of created collections. Essentially contains the last collection ID. diff --git a/pallets/fungible/src/common.rs b/pallets/fungible/src/common.rs index 0512bc886b..dd8a76f35e 100644 --- a/pallets/fungible/src/common.rs +++ b/pallets/fungible/src/common.rs @@ -19,7 +19,7 @@ use core::marker::PhantomData; use frame_support::{dispatch::DispatchResultWithPostInfo, ensure, fail, weights::Weight}; use pallet_common::{ weights::WeightInfo as _, with_weight, CommonCollectionOperations, CommonWeightInfo, - RefungibleExtensions, SelfWeightOf as PalletCommonWeightOf, + Error as CommonError, RefungibleExtensions, SelfWeightOf as PalletCommonWeightOf, }; use sp_runtime::{ArithmeticError, DispatchError}; use sp_std::{vec, vec::Vec}; @@ -169,7 +169,7 @@ impl CommonCollectionOperations for FungibleHandle { ) -> DispatchResultWithPostInfo { ensure!( token == TokenId::default(), - >::FungibleItemsHaveNoId + >::FungibleItemsHaveNoId ); with_weight( @@ -188,7 +188,7 @@ impl CommonCollectionOperations for FungibleHandle { ) -> DispatchResultWithPostInfo { ensure!( token == TokenId::default(), - >::FungibleItemsHaveNoId + >::FungibleItemsHaveNoId ); >::transfer(self, &from, &to, amount, nesting_budget) @@ -203,7 +203,7 @@ impl CommonCollectionOperations for FungibleHandle { ) -> DispatchResultWithPostInfo { ensure!( token == TokenId::default(), - >::FungibleItemsHaveNoId + >::FungibleItemsHaveNoId ); with_weight( @@ -222,7 +222,7 @@ impl CommonCollectionOperations for FungibleHandle { ) -> DispatchResultWithPostInfo { ensure!( token == TokenId::default(), - >::FungibleItemsHaveNoId + >::FungibleItemsHaveNoId ); with_weight( @@ -242,7 +242,7 @@ impl CommonCollectionOperations for FungibleHandle { ) -> DispatchResultWithPostInfo { ensure!( token == TokenId::default(), - >::FungibleItemsHaveNoId + >::FungibleItemsHaveNoId ); >::transfer_from(self, &sender, &from, &to, amount, nesting_budget) @@ -258,7 +258,7 @@ impl CommonCollectionOperations for FungibleHandle { ) -> DispatchResultWithPostInfo { ensure!( token == TokenId::default(), - >::FungibleItemsHaveNoId + >::FungibleItemsHaveNoId ); with_weight( diff --git a/pallets/fungible/src/lib.rs b/pallets/fungible/src/lib.rs index b2ab9bbcc0..1aadc416fc 100644 --- a/pallets/fungible/src/lib.rs +++ b/pallets/fungible/src/lib.rs @@ -123,8 +123,6 @@ pub mod pallet { pub enum Error { /// Not Fungible item data used to mint in Fungible collection. NotFungibleDataUsedToMintFungibleCollectionToken, - /// Fungible tokens hold no ID, and the default value of TokenId for Fungible collection is 0. - FungibleItemsHaveNoId, /// Tried to set data for fungible item. FungibleItemsDontHaveData, /// Fungible token does not support nesting. From 700b0c8abd12db6eb27ea5167cdf57799c9c4b66 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Mon, 16 Oct 2023 14:38:31 +0200 Subject: [PATCH 138/143] fix: enable throttling --- node/cli/src/command.rs | 2 ++ pallets/inflation/src/lib.rs | 4 +++- primitives/common/src/constants.rs | 3 +++ runtime/common/config/pallets/mod.rs | 7 ++++--- runtime/common/config/parachain.rs | 20 ++++++++++++++++++++ runtime/common/runtime_apis.rs | 7 +++---- 6 files changed, 35 insertions(+), 8 deletions(-) diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 945014c606..6ed625d191 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -396,6 +396,8 @@ pub fn run() -> Result<()> { } } #[cfg(feature = "try-runtime")] + // embedded try-runtime cli will be removed soon. + #[allow(deprecated)] Some(Subcommand::TryRuntime(cmd)) => { use std::{future::Future, pin::Pin}; diff --git a/pallets/inflation/src/lib.rs b/pallets/inflation/src/lib.rs index 6b9bf3a3e4..2ecbd88dff 100644 --- a/pallets/inflation/src/lib.rs +++ b/pallets/inflation/src/lib.rs @@ -71,7 +71,9 @@ pub mod pallet { type TreasuryAccountId: Get; // The block number provider, which should be callable from `on_initialize` hook. - type OnInitializeBlockNumberProvider: BlockNumberProvider>; + type OnInitializeBlockNumberProvider: BlockNumberProvider< + BlockNumber = BlockNumberFor, + >; /// Number of blocks that pass between treasury balance updates due to inflation #[pallet::constant] diff --git a/primitives/common/src/constants.rs b/primitives/common/src/constants.rs index ced66a2f6c..df7ae1db60 100644 --- a/primitives/common/src/constants.rs +++ b/primitives/common/src/constants.rs @@ -23,7 +23,10 @@ use sp_runtime::Perbill; use crate::types::{Balance, BlockNumber}; +#[cfg(not(feature = "lookahead"))] pub const MILLISECS_PER_BLOCK: u64 = 12000; +#[cfg(feature = "lookahead")] +pub const MILLISECS_PER_BLOCK: u64 = 3000; pub const MILLISECS_PER_RELAY_BLOCK: u64 = 6000; pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; diff --git a/runtime/common/config/pallets/mod.rs b/runtime/common/config/pallets/mod.rs index 5df782afdb..0da0638340 100644 --- a/runtime/common/config/pallets/mod.rs +++ b/runtime/common/config/pallets/mod.rs @@ -21,7 +21,7 @@ use frame_support::{ traits::{ConstU32, ConstU64, Currency}, }; use sp_arithmetic::Perbill; -use sp_runtime::traits::{BlockNumberProvider, AccountIdConversion}; +use sp_runtime::traits::{AccountIdConversion, BlockNumberProvider}; use up_common::{ constants::*, types::{AccountId, Balance, BlockNumber}, @@ -111,8 +111,8 @@ impl BlockNumberProvider for OnInitializeBlockNumberProvider { type BlockNumber = BlockNumber; fn current_block_number() -> Self::BlockNumber { - use parity_scale_codec::Decode; use hex_literal::hex; + use parity_scale_codec::Decode; use sp_io::storage; // TODO: Replace with the following code after https://github.com/paritytech/polkadot-sdk/commit/3ea497b5a0fdda252f9c5a3c257cfaf8685f02fd lands // >::last_relay_block_number() @@ -122,7 +122,8 @@ impl BlockNumberProvider for OnInitializeBlockNumberProvider { // First parachain block return Default::default() }; - BlockNumber::decode(&mut encoded.as_ref()).expect("typeof(RelayBlockNumber) == typeof(BlockNumber) == u32; qed") + BlockNumber::decode(&mut encoded.as_ref()) + .expect("typeof(RelayBlockNumber) == typeof(BlockNumber) == u32; qed") } } diff --git a/runtime/common/config/parachain.rs b/runtime/common/config/parachain.rs index ad42e39958..d56da711ca 100644 --- a/runtime/common/config/parachain.rs +++ b/runtime/common/config/parachain.rs @@ -38,9 +38,29 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedDmpWeight = ReservedDmpWeight; type ReservedXcmpWeight = ReservedXcmpWeight; type XcmpMessageHandler = XcmpQueue; + #[cfg(not(feature = "lookahead"))] type CheckAssociatedRelayNumber = cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; + #[cfg(feature = "lookahead")] + type CheckAssociatedRelayNumber = + cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; } impl parachain_info::Config for Runtime {} impl cumulus_pallet_aura_ext::Config for Runtime {} + +/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included +/// into the relay chain. +#[cfg(feature = "lookahead")] +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; +/// How many parachain blocks are processed by the relay chain per parent. Limits the +/// number of blocks authored per slot. +#[cfg(feature = "lookahead")] +const BLOCK_PROCESSING_VELOCITY: u32 = 2; +#[cfg(feature = "lookahead")] +pub type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + { MILLISECS_PER_RELAY_BLOCK as u32 }, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, +>; diff --git a/runtime/common/runtime_apis.rs b/runtime/common/runtime_apis.rs index 3c42220c1e..f15a5c6ae6 100644 --- a/runtime/common/runtime_apis.rs +++ b/runtime/common/runtime_apis.rs @@ -682,11 +682,10 @@ macro_rules! impl_common_runtime_apis { #[cfg(feature = "lookahead")] impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { fn can_build_upon( - _included_hash: ::Hash, - _slot: cumulus_primitives_aura::Slot, + included_hash: ::Hash, + slot: cumulus_primitives_aura::Slot, ) -> bool { - // FIXME: Limit velocity - true + $crate::config::parachain::ConsensusHook::can_build_upon(included_hash, slot) } } From 1f267425b9a38982f7c2f0850b91a1c03c8fb692 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 17 Oct 2023 11:28:35 +0200 Subject: [PATCH 139/143] test: fix build --- node/cli/Cargo.toml | 4 +--- pallets/inflation/src/tests.rs | 2 +- pallets/unique/Cargo.toml | 2 +- runtime/opal/Cargo.toml | 4 ++-- runtime/tests/src/tests.rs | 2 +- 5 files changed, 6 insertions(+), 8 deletions(-) diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index 32acce376d..7875a8a5cf 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -114,9 +114,7 @@ gov-test-timings = [ 'quartz-runtime?/gov-test-timings', 'unique-runtime?/gov-test-timings', ] -lookahead = [ - 'opal-runtime/lookahead' -] +lookahead = ['opal-runtime/lookahead'] pov-estimate = [ 'opal-runtime/pov-estimate', 'quartz-runtime?/pov-estimate', diff --git a/pallets/inflation/src/tests.rs b/pallets/inflation/src/tests.rs index 221c964496..b3cde26614 100644 --- a/pallets/inflation/src/tests.rs +++ b/pallets/inflation/src/tests.rs @@ -122,7 +122,7 @@ impl pallet_inflation::Config for Test { type Currency = Balances; type TreasuryAccountId = TreasuryAccountId; type InflationBlockInterval = InflationBlockInterval; - type BlockNumberProvider = MockBlockNumberProvider; + type OnInitializeBlockNumberProvider = MockBlockNumberProvider; } pub fn new_test_ext() -> sp_io::TestExternalities { diff --git a/pallets/unique/Cargo.toml b/pallets/unique/Cargo.toml index d6e9265f85..cb3f02f9a2 100644 --- a/pallets/unique/Cargo.toml +++ b/pallets/unique/Cargo.toml @@ -28,12 +28,12 @@ std = [ 'pallet-evm-coder-substrate/std', 'pallet-evm/std', 'pallet-nonfungible/std', + 'pallet-structure/std', 'parity-scale-codec/std', 'sp-runtime/std', 'sp-std/std', 'up-common/std', 'up-data-structs/std', - 'pallet-structure/std', ] stubgen = ["evm-coder/stubgen", "pallet-common/stubgen"] try-runtime = ["frame-support/try-runtime"] diff --git a/runtime/opal/Cargo.toml b/runtime/opal/Cargo.toml index ee11854589..70ae0d3b54 100644 --- a/runtime/opal/Cargo.toml +++ b/runtime/opal/Cargo.toml @@ -137,7 +137,7 @@ std = [ 'sp-runtime/std', 'sp-session/std', 'sp-std/std', - 'sp-storage/std', + 'sp-storage/std', 'sp-transaction-pool/std', 'sp-version/std', 'staging-xcm-builder/std', @@ -228,10 +228,10 @@ collator-selection = [] foreign-assets = [] gov-test-timings = [] governance = [] +lookahead = [] preimage = [] refungible = [] session-test-timings = [] -lookahead = [] ################################################################################ # local dependencies diff --git a/runtime/tests/src/tests.rs b/runtime/tests/src/tests.rs index e912646d7b..fe76886665 100644 --- a/runtime/tests/src/tests.rs +++ b/runtime/tests/src/tests.rs @@ -1161,7 +1161,7 @@ fn burn_fungible_item_with_token_id() { // Try to burn item using Token ID assert_noop!( Unique::burn_item(origin1, CollectionId(1), TokenId(1), 5).map_err(|e| e.error), - >::FungibleItemsHaveNoId + >::FungibleItemsHaveNoId ); }); } From 2f9af76e0de599fd5dbd4d870812700e82e0fd63 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 17 Oct 2023 11:55:03 +0200 Subject: [PATCH 140/143] fix: ci intergration tests profile --- .docker/Dockerfile-unique | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.docker/Dockerfile-unique b/.docker/Dockerfile-unique index 2779413575..4853de2514 100644 --- a/.docker/Dockerfile-unique +++ b/.docker/Dockerfile-unique @@ -48,8 +48,8 @@ RUN --mount=type=cache,target=/cargo-home/registry \ cd unique-chain && \ echo "Using runtime features '$RUNTIME_FEATURES'" && \ CARGO_INCREMENTAL=0 cargo build --profile integration-tests --features="$RUNTIME_FEATURES" --locked && \ - mv ./target/release/unique-collator /unique_parachain/unique-chain/ && \ - cd target/release/wbuild && find . -name "*.wasm" -exec sh -c 'mkdir -p "../../../wasm/$(dirname {})"; cp {} "../../../wasm/{}"' \; + mv ./target/integration-tests/unique-collator /unique_parachain/unique-chain/ && \ + cd target/integration-tests/wbuild && find . -name "*.wasm" -exec sh -c 'mkdir -p "../../../wasm/$(dirname {})"; cp {} "../../../wasm/{}"' \; # ===== BIN ====== From cd9af939796170a748fc0e40848ba1e9b1d4f885 Mon Sep 17 00:00:00 2001 From: Igor Kozyrev Date: Tue, 17 Oct 2023 12:06:46 +0200 Subject: [PATCH 141/143] build: run benchmarks --- Makefile | 2 +- pallets/app-promotion/src/weights.rs | 102 +-- pallets/collator-selection/src/weights.rs | 488 +++++++------- pallets/common/src/weights.rs | 34 +- pallets/configuration/src/weights.rs | 120 ++-- pallets/evm-migration/src/weights.rs | 136 ++-- pallets/foreign-assets/src/weights.rs | 136 ++-- pallets/fungible/src/weights.rs | 216 +++--- pallets/identity/src/weights.rs | 778 +++++++++++----------- pallets/maintenance/src/weights.rs | 40 +- pallets/nonfungible/src/weights.rs | 154 ++--- pallets/refungible/src/weights.rs | 234 +++---- pallets/structure/src/weights.rs | 32 +- pallets/unique/src/weights.rs | 360 +++++----- primitives/common/src/constants.rs | 4 +- runtime/common/weights/xcm.rs | 240 +++---- 16 files changed, 1538 insertions(+), 1538 deletions(-) diff --git a/Makefile b/Makefile index dbd6b0fa01..c386687228 100644 --- a/Makefile +++ b/Makefile @@ -117,7 +117,7 @@ $(eval $(call _bench,collator-selection)) $(eval $(call _bench,identity)) $(eval $(call _bench,app-promotion)) $(eval $(call _bench,maintenance)) -$(eval $(call _bench,xcm,,./runtime/common/weights/xcm.rs,"--template=.maintain/external-weights/template.hbs")) +$(eval $(call _bench,xcm,,./runtime/common/weights/xcm.rs,"--template=.maintain/external-weight-template.hbs")) .PHONY: bench bench: bench-app-promotion bench-common bench-evm-migration bench-unique bench-structure bench-fungible bench-refungible bench-nonfungible bench-configuration bench-foreign-assets bench-maintenance bench-xcm bench-collator-selection bench-identity diff --git a/pallets/app-promotion/src/weights.rs b/pallets/app-promotion/src/weights.rs index fc1a4ebba4..fbcaf1c5e1 100644 --- a/pallets/app-promotion/src/weights.rs +++ b/pallets/app-promotion/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_app_promotion //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-12, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `ubuntu-11`, CPU: `QEMU Virtual CPU version 2.5+` +//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -63,10 +63,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `222 + b * (285 ±0)` // Estimated: `3622 + b * (3774 ±0)` - // Minimum execution time: 6_031_000 picoseconds. - Weight::from_parts(6_880_848, 3622) - // Standard Error: 18_753 - .saturating_add(Weight::from_parts(22_907_186, 0).saturating_mul(b.into())) + // Minimum execution time: 5_767_000 picoseconds. + Weight::from_parts(6_702_740, 3622) + // Standard Error: 23_437 + .saturating_add(Weight::from_parts(21_208_495, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -78,8 +78,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_565_000 picoseconds. - Weight::from_parts(7_795_000, 0) + // Minimum execution time: 7_836_000 picoseconds. + Weight::from_parts(8_115_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `AppPromotion::Admin` (r:1 w:0) @@ -105,10 +105,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `564 + b * (641 ±0)` // Estimated: `3593 + b * (25550 ±0)` - // Minimum execution time: 146_577_000 picoseconds. - Weight::from_parts(147_970_000, 3593) - // Standard Error: 59_065 - .saturating_add(Weight::from_parts(115_527_092, 0).saturating_mul(b.into())) + // Minimum execution time: 136_584_000 picoseconds. + Weight::from_parts(3_372_327, 3593) + // Standard Error: 21_362 + .saturating_add(Weight::from_parts(103_567_863, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((13_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -135,8 +135,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `389` // Estimated: `4764` - // Minimum execution time: 46_889_000 picoseconds. - Weight::from_parts(47_549_000, 4764) + // Minimum execution time: 43_464_000 picoseconds. + Weight::from_parts(44_032_000, 4764) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -154,8 +154,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 63_069_000 picoseconds. - Weight::from_parts(64_522_000, 29095) + // Minimum execution time: 58_179_000 picoseconds. + Weight::from_parts(58_981_000, 29095) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -173,8 +173,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 84_649_000 picoseconds. - Weight::from_parts(86_173_000, 29095) + // Minimum execution time: 74_937_000 picoseconds. + Weight::from_parts(76_167_000, 29095) .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -186,8 +186,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1060` // Estimated: `4325` - // Minimum execution time: 24_396_000 picoseconds. - Weight::from_parts(24_917_000, 4325) + // Minimum execution time: 23_306_000 picoseconds. + Weight::from_parts(23_680_000, 4325) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -199,8 +199,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1092` // Estimated: `4325` - // Minimum execution time: 22_412_000 picoseconds. - Weight::from_parts(23_033_000, 4325) + // Minimum execution time: 21_298_000 picoseconds. + Weight::from_parts(21_651_000, 4325) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -212,8 +212,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `198` // Estimated: `1517` - // Minimum execution time: 21_621_000 picoseconds. - Weight::from_parts(22_041_000, 1517) + // Minimum execution time: 20_356_000 picoseconds. + Weight::from_parts(20_764_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -225,8 +225,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `396` // Estimated: `3527` - // Minimum execution time: 19_186_000 picoseconds. - Weight::from_parts(19_616_000, 3527) + // Minimum execution time: 18_207_000 picoseconds. + Weight::from_parts(18_565_000, 3527) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -249,10 +249,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `222 + b * (285 ±0)` // Estimated: `3622 + b * (3774 ±0)` - // Minimum execution time: 6_031_000 picoseconds. - Weight::from_parts(6_880_848, 3622) - // Standard Error: 18_753 - .saturating_add(Weight::from_parts(22_907_186, 0).saturating_mul(b.into())) + // Minimum execution time: 5_767_000 picoseconds. + Weight::from_parts(6_702_740, 3622) + // Standard Error: 23_437 + .saturating_add(Weight::from_parts(21_208_495, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -264,8 +264,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_565_000 picoseconds. - Weight::from_parts(7_795_000, 0) + // Minimum execution time: 7_836_000 picoseconds. + Weight::from_parts(8_115_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `AppPromotion::Admin` (r:1 w:0) @@ -291,10 +291,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `564 + b * (641 ±0)` // Estimated: `3593 + b * (25550 ±0)` - // Minimum execution time: 146_577_000 picoseconds. - Weight::from_parts(147_970_000, 3593) - // Standard Error: 59_065 - .saturating_add(Weight::from_parts(115_527_092, 0).saturating_mul(b.into())) + // Minimum execution time: 136_584_000 picoseconds. + Weight::from_parts(3_372_327, 3593) + // Standard Error: 21_362 + .saturating_add(Weight::from_parts(103_567_863, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((13_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -321,8 +321,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `389` // Estimated: `4764` - // Minimum execution time: 46_889_000 picoseconds. - Weight::from_parts(47_549_000, 4764) + // Minimum execution time: 43_464_000 picoseconds. + Weight::from_parts(44_032_000, 4764) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -340,8 +340,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 63_069_000 picoseconds. - Weight::from_parts(64_522_000, 29095) + // Minimum execution time: 58_179_000 picoseconds. + Weight::from_parts(58_981_000, 29095) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -359,8 +359,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `829` // Estimated: `29095` - // Minimum execution time: 84_649_000 picoseconds. - Weight::from_parts(86_173_000, 29095) + // Minimum execution time: 74_937_000 picoseconds. + Weight::from_parts(76_167_000, 29095) .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -372,8 +372,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1060` // Estimated: `4325` - // Minimum execution time: 24_396_000 picoseconds. - Weight::from_parts(24_917_000, 4325) + // Minimum execution time: 23_306_000 picoseconds. + Weight::from_parts(23_680_000, 4325) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -385,8 +385,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1092` // Estimated: `4325` - // Minimum execution time: 22_412_000 picoseconds. - Weight::from_parts(23_033_000, 4325) + // Minimum execution time: 21_298_000 picoseconds. + Weight::from_parts(21_651_000, 4325) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -398,8 +398,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `198` // Estimated: `1517` - // Minimum execution time: 21_621_000 picoseconds. - Weight::from_parts(22_041_000, 1517) + // Minimum execution time: 20_356_000 picoseconds. + Weight::from_parts(20_764_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -411,8 +411,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `396` // Estimated: `3527` - // Minimum execution time: 19_186_000 picoseconds. - Weight::from_parts(19_616_000, 3527) + // Minimum execution time: 18_207_000 picoseconds. + Weight::from_parts(18_565_000, 3527) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/collator-selection/src/weights.rs b/pallets/collator-selection/src/weights.rs index c06f1f39a8..43b3aa2a2b 100644 --- a/pallets/collator-selection/src/weights.rs +++ b/pallets/collator-selection/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_collator_selection //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/collator-selection/src/weights.rs @@ -47,373 +47,373 @@ pub trait WeightInfo { /// Weights for pallet_collator_selection using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Session NextKeys (r:1 w:0) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: CollatorSelection Invulnerables (r:1 w:1) - /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: CollatorSelection Candidates (r:1 w:0) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// The range of component `b` is `[1, 7]`. + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// The range of component `b` is `[2, 8]`. fn add_invulnerable(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + b * (45 ±0)` - // Estimated: `3873 + b * (45 ±0)` - // Minimum execution time: 10_975_000 picoseconds. - Weight::from_parts(11_362_608, 3873) - // Standard Error: 411 - .saturating_add(Weight::from_parts(152_014, 0).saturating_mul(b.into())) + // Measured: `358 + b * (45 ±0)` + // Estimated: `3829 + b * (45 ±0)` + // Minimum execution time: 18_241_000 picoseconds. + Weight::from_parts(18_396_907, 3829) + // Standard Error: 1_022 + .saturating_add(Weight::from_parts(184_843, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(b.into())) } - /// Storage: CollatorSelection Invulnerables (r:1 w:1) - /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// The range of component `b` is `[1, 7]`. + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 8]`. fn remove_invulnerable(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `96 + b * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 6_369_000 picoseconds. - Weight::from_parts(6_604_933, 1806) - // Standard Error: 424 - .saturating_add(Weight::from_parts(145_929, 0).saturating_mul(b.into())) + // Minimum execution time: 11_945_000 picoseconds. + Weight::from_parts(12_355_271, 1806) + // Standard Error: 1_116 + .saturating_add(Weight::from_parts(121_483, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: CollatorSelection LicenseDepositOf (r:1 w:1) - /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: Session NextKeys (r:1 w:0) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration CollatorSelectionLicenseBondOverride (r:1 w:0) - /// Proof: Configuration CollatorSelectionLicenseBondOverride (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// The range of component `c` is `[1, 9]`. + /// Storage: `CollatorSelection::LicenseDepositOf` (r:1 w:1) + /// Proof: `CollatorSelection::LicenseDepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::CollatorSelectionLicenseBondOverride` (r:1 w:0) + /// Proof: `Configuration::CollatorSelectionLicenseBondOverride` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(229), added: 2704, mode: `MaxEncodedLen`) + /// The range of component `c` is `[1, 10]`. fn get_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `668 + c * (46 ±0)` - // Estimated: `4131 + c * (47 ±0)` - // Minimum execution time: 23_857_000 picoseconds. - Weight::from_parts(25_984_655, 4131) - // Standard Error: 4_364 - .saturating_add(Weight::from_parts(521_198, 0).saturating_mul(c.into())) + // Measured: `655 + c * (47 ±0)` + // Estimated: `4119 + c * (47 ±0)` + // Minimum execution time: 48_737_000 picoseconds. + Weight::from_parts(51_476_545, 4119) + // Standard Error: 9_412 + .saturating_add(Weight::from_parts(444_940, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 47).saturating_mul(c.into())) } - /// Storage: CollatorSelection LicenseDepositOf (r:1 w:0) - /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: CollatorSelection Candidates (r:1 w:1) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: CollatorSelection Invulnerables (r:1 w:0) - /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: Configuration CollatorSelectionDesiredCollatorsOverride (r:1 w:0) - /// Proof: Configuration CollatorSelectionDesiredCollatorsOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:1 w:0) - /// Proof: Configuration CollatorSelectionKickThresholdOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// The range of component `c` is `[1, 7]`. + /// Storage: `CollatorSelection::LicenseDepositOf` (r:1 w:0) + /// Proof: `CollatorSelection::LicenseDepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `Configuration::CollatorSelectionDesiredCollatorsOverride` (r:1 w:0) + /// Proof: `Configuration::CollatorSelectionDesiredCollatorsOverride` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Configuration::CollatorSelectionKickThresholdOverride` (r:1 w:0) + /// Proof: `Configuration::CollatorSelectionKickThresholdOverride` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[2, 8]`. fn onboard(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `414 + c * (54 ±0)` + // Measured: `360 + c * (54 ±0)` // Estimated: `3529` - // Minimum execution time: 14_337_000 picoseconds. - Weight::from_parts(14_827_525, 3529) - // Standard Error: 1_210 - .saturating_add(Weight::from_parts(298_748, 0).saturating_mul(c.into())) + // Minimum execution time: 24_530_000 picoseconds. + Weight::from_parts(24_716_259, 3529) + // Standard Error: 2_330 + .saturating_add(Weight::from_parts(277_933, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: CollatorSelection Candidates (r:1 w:1) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `c` is `[1, 8]`. fn offboard(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `111 + c * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 7_320_000 picoseconds. - Weight::from_parts(7_646_004, 1806) - // Standard Error: 479 - .saturating_add(Weight::from_parts(160_089, 0).saturating_mul(c.into())) + // Minimum execution time: 13_646_000 picoseconds. + Weight::from_parts(14_002_776, 1806) + // Standard Error: 1_225 + .saturating_add(Weight::from_parts(168_628, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: CollatorSelection Candidates (r:1 w:1) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: CollatorSelection LicenseDepositOf (r:1 w:1) - /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LicenseDepositOf` (r:1 w:1) + /// Proof: `CollatorSelection::LicenseDepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(229), added: 2704, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `c` is `[1, 8]`. fn release_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `328 + c * (103 ±0)` - // Estimated: `3834` - // Minimum execution time: 22_821_000 picoseconds. - Weight::from_parts(23_668_202, 3834) - // Standard Error: 6_654 - .saturating_add(Weight::from_parts(844_978, 0).saturating_mul(c.into())) + // Measured: `313 + c * (103 ±0)` + // Estimated: `3694` + // Minimum execution time: 46_688_000 picoseconds. + Weight::from_parts(47_862_445, 3694) + // Standard Error: 9_538 + .saturating_add(Weight::from_parts(709_448, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: CollatorSelection Candidates (r:1 w:1) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: CollatorSelection LicenseDepositOf (r:1 w:1) - /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LicenseDepositOf` (r:1 w:1) + /// Proof: `CollatorSelection::LicenseDepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(229), added: 2704, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `c` is `[1, 8]`. fn force_release_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `328 + c * (103 ±0)` - // Estimated: `3834` - // Minimum execution time: 22_462_000 picoseconds. - Weight::from_parts(23_215_875, 3834) - // Standard Error: 6_450 - .saturating_add(Weight::from_parts(830_887, 0).saturating_mul(c.into())) + // Measured: `313 + c * (103 ±0)` + // Estimated: `3694` + // Minimum execution time: 45_887_000 picoseconds. + Weight::from_parts(47_099_490, 3694) + // Standard Error: 9_203 + .saturating_add(Weight::from_parts(708_747, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: System BlockWeight (r:1 w:1) - /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn note_author() -> Weight { // Proof Size summary in bytes: // Measured: `155` // Estimated: `6196` - // Minimum execution time: 17_624_000 picoseconds. - Weight::from_parts(18_025_000, 6196) + // Minimum execution time: 43_192_000 picoseconds. + Weight::from_parts(43_667_000, 6196) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: CollatorSelection Candidates (r:1 w:0) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:1 w:0) - /// Proof: Configuration CollatorSelectionKickThresholdOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:8 w:0) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// Storage: CollatorSelection Invulnerables (r:1 w:0) - /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: System BlockWeight (r:1 w:1) - /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) - /// Storage: CollatorSelection LicenseDepositOf (r:7 w:7) - /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:7 w:7) - /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: System Account (r:8 w:8) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `Configuration::CollatorSelectionKickThresholdOverride` (r:1 w:0) + /// Proof: `Configuration::CollatorSelectionKickThresholdOverride` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:8 w:0) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LicenseDepositOf` (r:7 w:7) + /// Proof: `CollatorSelection::LicenseDepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:7 w:7) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(229), added: 2704, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:8 w:8) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 8]`. /// The range of component `c` is `[1, 8]`. fn new_session(r: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `725 + c * (84 ±0) + r * (254 ±0)` - // Estimated: `6196 + c * (2519 ±0) + r * (2844 ±0)` - // Minimum execution time: 11_318_000 picoseconds. - Weight::from_parts(11_615_000, 6196) - // Standard Error: 69_557 - .saturating_add(Weight::from_parts(13_016_275, 0).saturating_mul(c.into())) + // Measured: `725 + c * (84 ±0) + r * (239 ±0)` + // Estimated: `26857 + c * (2519 ±0) + r * (2704 ±4)` + // Minimum execution time: 18_204_000 picoseconds. + Weight::from_parts(18_486_000, 26857) + // Standard Error: 368_905 + .saturating_add(Weight::from_parts(27_883_644, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2844).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2704).saturating_mul(r.into())) } } // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Session NextKeys (r:1 w:0) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: CollatorSelection Invulnerables (r:1 w:1) - /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: CollatorSelection Candidates (r:1 w:0) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// The range of component `b` is `[1, 7]`. + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// The range of component `b` is `[2, 8]`. fn add_invulnerable(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + b * (45 ±0)` - // Estimated: `3873 + b * (45 ±0)` - // Minimum execution time: 10_975_000 picoseconds. - Weight::from_parts(11_362_608, 3873) - // Standard Error: 411 - .saturating_add(Weight::from_parts(152_014, 0).saturating_mul(b.into())) + // Measured: `358 + b * (45 ±0)` + // Estimated: `3829 + b * (45 ±0)` + // Minimum execution time: 18_241_000 picoseconds. + Weight::from_parts(18_396_907, 3829) + // Standard Error: 1_022 + .saturating_add(Weight::from_parts(184_843, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(b.into())) } - /// Storage: CollatorSelection Invulnerables (r:1 w:1) - /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// The range of component `b` is `[1, 7]`. + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 8]`. fn remove_invulnerable(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `96 + b * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 6_369_000 picoseconds. - Weight::from_parts(6_604_933, 1806) - // Standard Error: 424 - .saturating_add(Weight::from_parts(145_929, 0).saturating_mul(b.into())) + // Minimum execution time: 11_945_000 picoseconds. + Weight::from_parts(12_355_271, 1806) + // Standard Error: 1_116 + .saturating_add(Weight::from_parts(121_483, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: CollatorSelection LicenseDepositOf (r:1 w:1) - /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: Session NextKeys (r:1 w:0) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration CollatorSelectionLicenseBondOverride (r:1 w:0) - /// Proof: Configuration CollatorSelectionLicenseBondOverride (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// The range of component `c` is `[1, 9]`. + /// Storage: `CollatorSelection::LicenseDepositOf` (r:1 w:1) + /// Proof: `CollatorSelection::LicenseDepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::CollatorSelectionLicenseBondOverride` (r:1 w:0) + /// Proof: `Configuration::CollatorSelectionLicenseBondOverride` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(229), added: 2704, mode: `MaxEncodedLen`) + /// The range of component `c` is `[1, 10]`. fn get_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `668 + c * (46 ±0)` - // Estimated: `4131 + c * (47 ±0)` - // Minimum execution time: 23_857_000 picoseconds. - Weight::from_parts(25_984_655, 4131) - // Standard Error: 4_364 - .saturating_add(Weight::from_parts(521_198, 0).saturating_mul(c.into())) + // Measured: `655 + c * (47 ±0)` + // Estimated: `4119 + c * (47 ±0)` + // Minimum execution time: 48_737_000 picoseconds. + Weight::from_parts(51_476_545, 4119) + // Standard Error: 9_412 + .saturating_add(Weight::from_parts(444_940, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 47).saturating_mul(c.into())) } - /// Storage: CollatorSelection LicenseDepositOf (r:1 w:0) - /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: CollatorSelection Candidates (r:1 w:1) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: CollatorSelection Invulnerables (r:1 w:0) - /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: Configuration CollatorSelectionDesiredCollatorsOverride (r:1 w:0) - /// Proof: Configuration CollatorSelectionDesiredCollatorsOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:1 w:0) - /// Proof: Configuration CollatorSelectionKickThresholdOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// The range of component `c` is `[1, 7]`. + /// Storage: `CollatorSelection::LicenseDepositOf` (r:1 w:0) + /// Proof: `CollatorSelection::LicenseDepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `Configuration::CollatorSelectionDesiredCollatorsOverride` (r:1 w:0) + /// Proof: `Configuration::CollatorSelectionDesiredCollatorsOverride` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Configuration::CollatorSelectionKickThresholdOverride` (r:1 w:0) + /// Proof: `Configuration::CollatorSelectionKickThresholdOverride` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[2, 8]`. fn onboard(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `414 + c * (54 ±0)` + // Measured: `360 + c * (54 ±0)` // Estimated: `3529` - // Minimum execution time: 14_337_000 picoseconds. - Weight::from_parts(14_827_525, 3529) - // Standard Error: 1_210 - .saturating_add(Weight::from_parts(298_748, 0).saturating_mul(c.into())) + // Minimum execution time: 24_530_000 picoseconds. + Weight::from_parts(24_716_259, 3529) + // Standard Error: 2_330 + .saturating_add(Weight::from_parts(277_933, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: CollatorSelection Candidates (r:1 w:1) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `c` is `[1, 8]`. fn offboard(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `111 + c * (32 ±0)` // Estimated: `1806` - // Minimum execution time: 7_320_000 picoseconds. - Weight::from_parts(7_646_004, 1806) - // Standard Error: 479 - .saturating_add(Weight::from_parts(160_089, 0).saturating_mul(c.into())) + // Minimum execution time: 13_646_000 picoseconds. + Weight::from_parts(14_002_776, 1806) + // Standard Error: 1_225 + .saturating_add(Weight::from_parts(168_628, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: CollatorSelection Candidates (r:1 w:1) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: CollatorSelection LicenseDepositOf (r:1 w:1) - /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LicenseDepositOf` (r:1 w:1) + /// Proof: `CollatorSelection::LicenseDepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(229), added: 2704, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `c` is `[1, 8]`. fn release_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `328 + c * (103 ±0)` - // Estimated: `3834` - // Minimum execution time: 22_821_000 picoseconds. - Weight::from_parts(23_668_202, 3834) - // Standard Error: 6_654 - .saturating_add(Weight::from_parts(844_978, 0).saturating_mul(c.into())) + // Measured: `313 + c * (103 ±0)` + // Estimated: `3694` + // Minimum execution time: 46_688_000 picoseconds. + Weight::from_parts(47_862_445, 3694) + // Standard Error: 9_538 + .saturating_add(Weight::from_parts(709_448, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: CollatorSelection Candidates (r:1 w:1) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: CollatorSelection LicenseDepositOf (r:1 w:1) - /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LicenseDepositOf` (r:1 w:1) + /// Proof: `CollatorSelection::LicenseDepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(229), added: 2704, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `c` is `[1, 8]`. fn force_release_license(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `328 + c * (103 ±0)` - // Estimated: `3834` - // Minimum execution time: 22_462_000 picoseconds. - Weight::from_parts(23_215_875, 3834) - // Standard Error: 6_450 - .saturating_add(Weight::from_parts(830_887, 0).saturating_mul(c.into())) + // Measured: `313 + c * (103 ±0)` + // Estimated: `3694` + // Minimum execution time: 45_887_000 picoseconds. + Weight::from_parts(47_099_490, 3694) + // Standard Error: 9_203 + .saturating_add(Weight::from_parts(708_747, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: System BlockWeight (r:1 w:1) - /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:0 w:1) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn note_author() -> Weight { // Proof Size summary in bytes: // Measured: `155` // Estimated: `6196` - // Minimum execution time: 17_624_000 picoseconds. - Weight::from_parts(18_025_000, 6196) + // Minimum execution time: 43_192_000 picoseconds. + Weight::from_parts(43_667_000, 6196) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: CollatorSelection Candidates (r:1 w:0) - /// Proof: CollatorSelection Candidates (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:1 w:0) - /// Proof: Configuration CollatorSelectionKickThresholdOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: CollatorSelection LastAuthoredBlock (r:8 w:0) - /// Proof: CollatorSelection LastAuthoredBlock (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// Storage: CollatorSelection Invulnerables (r:1 w:0) - /// Proof: CollatorSelection Invulnerables (max_values: Some(1), max_size: Some(321), added: 816, mode: MaxEncodedLen) - /// Storage: System BlockWeight (r:1 w:1) - /// Proof: System BlockWeight (max_values: Some(1), max_size: Some(48), added: 543, mode: MaxEncodedLen) - /// Storage: CollatorSelection LicenseDepositOf (r:7 w:7) - /// Proof: CollatorSelection LicenseDepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:7 w:7) - /// Proof: Balances Holds (max_values: None, max_size: Some(369), added: 2844, mode: MaxEncodedLen) - /// Storage: System Account (r:8 w:8) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `Configuration::CollatorSelectionKickThresholdOverride` (r:1 w:0) + /// Proof: `Configuration::CollatorSelectionKickThresholdOverride` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:8 w:0) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(321), added: 816, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LicenseDepositOf` (r:7 w:7) + /// Proof: `CollatorSelection::LicenseDepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:7 w:7) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(229), added: 2704, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:8 w:8) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 8]`. /// The range of component `c` is `[1, 8]`. fn new_session(r: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `725 + c * (84 ±0) + r * (254 ±0)` - // Estimated: `6196 + c * (2519 ±0) + r * (2844 ±0)` - // Minimum execution time: 11_318_000 picoseconds. - Weight::from_parts(11_615_000, 6196) - // Standard Error: 69_557 - .saturating_add(Weight::from_parts(13_016_275, 0).saturating_mul(c.into())) + // Measured: `725 + c * (84 ±0) + r * (239 ±0)` + // Estimated: `26857 + c * (2519 ±0) + r * (2704 ±4)` + // Minimum execution time: 18_204_000 picoseconds. + Weight::from_parts(18_486_000, 26857) + // Standard Error: 368_905 + .saturating_add(Weight::from_parts(27_883_644, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2844).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2704).saturating_mul(r.into())) } } diff --git a/pallets/common/src/weights.rs b/pallets/common/src/weights.rs index 57a73dfdb0..c6566ed601 100644 --- a/pallets/common/src/weights.rs +++ b/pallets/common/src/weights.rs @@ -5,7 +5,7 @@ //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` +//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -48,10 +48,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 4_560_000 picoseconds. - Weight::from_parts(28_643_440, 44457) - // Standard Error: 28_941 - .saturating_add(Weight::from_parts(18_277_422, 0).saturating_mul(b.into())) + // Minimum execution time: 7_698_000 picoseconds. + Weight::from_parts(996_959, 44457) + // Standard Error: 56_401 + .saturating_add(Weight::from_parts(36_272_294, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -61,8 +61,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 4_290_000 picoseconds. - Weight::from_parts(4_460_000, 3535) + // Minimum execution time: 6_235_000 picoseconds. + Weight::from_parts(6_437_000, 3535) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Common::IsAdmin` (r:1 w:0) @@ -73,8 +73,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `20191` - // Minimum execution time: 6_100_000 picoseconds. - Weight::from_parts(6_350_000, 20191) + // Minimum execution time: 8_907_000 picoseconds. + Weight::from_parts(9_168_000, 20191) .saturating_add(T::DbWeight::get().reads(2_u64)) } } @@ -88,10 +88,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 4_560_000 picoseconds. - Weight::from_parts(28_643_440, 44457) - // Standard Error: 28_941 - .saturating_add(Weight::from_parts(18_277_422, 0).saturating_mul(b.into())) + // Minimum execution time: 7_698_000 picoseconds. + Weight::from_parts(996_959, 44457) + // Standard Error: 56_401 + .saturating_add(Weight::from_parts(36_272_294, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -101,8 +101,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `373` // Estimated: `3535` - // Minimum execution time: 4_290_000 picoseconds. - Weight::from_parts(4_460_000, 3535) + // Minimum execution time: 6_235_000 picoseconds. + Weight::from_parts(6_437_000, 3535) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Common::IsAdmin` (r:1 w:0) @@ -113,8 +113,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `20191` - // Minimum execution time: 6_100_000 picoseconds. - Weight::from_parts(6_350_000, 20191) + // Minimum execution time: 8_907_000 picoseconds. + Weight::from_parts(9_168_000, 20191) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/pallets/configuration/src/weights.rs b/pallets/configuration/src/weights.rs index 99e47ed2f7..df7c213b99 100644 --- a/pallets/configuration/src/weights.rs +++ b/pallets/configuration/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_configuration //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/configuration/src/weights.rs @@ -44,136 +44,136 @@ pub trait WeightInfo { /// Weights for pallet_configuration using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Configuration WeightToFeeCoefficientOverride (r:0 w:1) - /// Proof: Configuration WeightToFeeCoefficientOverride (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Configuration::WeightToFeeCoefficientOverride` (r:0 w:1) + /// Proof: `Configuration::WeightToFeeCoefficientOverride` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set_weight_to_fee_coefficient_override() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 990_000 picoseconds. - Weight::from_parts(1_090_000, 0) + // Minimum execution time: 2_478_000 picoseconds. + Weight::from_parts(2_614_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Configuration MinGasPriceOverride (r:0 w:1) - /// Proof: Configuration MinGasPriceOverride (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: unknown `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) - /// Proof Skipped: unknown `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) - /// Storage: unknown `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) - /// Proof Skipped: unknown `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) + /// Storage: `Configuration::MinGasPriceOverride` (r:0 w:1) + /// Proof: `Configuration::MinGasPriceOverride` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) fn set_min_gas_price_override() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_469_000 picoseconds. - Weight::from_parts(1_565_000, 0) + // Minimum execution time: 3_934_000 picoseconds. + Weight::from_parts(4_092_000, 0) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Configuration AppPromomotionConfigurationOverride (r:0 w:1) - /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) + /// Storage: `Configuration::AppPromomotionConfigurationOverride` (r:0 w:1) + /// Proof: `Configuration::AppPromomotionConfigurationOverride` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn set_app_promotion_configuration_override() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_027_000 picoseconds. - Weight::from_parts(1_098_000, 0) + // Minimum execution time: 2_657_000 picoseconds. + Weight::from_parts(2_768_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Configuration CollatorSelectionDesiredCollatorsOverride (r:0 w:1) - /// Proof: Configuration CollatorSelectionDesiredCollatorsOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Configuration::CollatorSelectionDesiredCollatorsOverride` (r:0 w:1) + /// Proof: `Configuration::CollatorSelectionDesiredCollatorsOverride` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_collator_selection_desired_collators() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_149_000 picoseconds. - Weight::from_parts(4_326_000, 0) + // Minimum execution time: 9_148_000 picoseconds. + Weight::from_parts(9_521_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Configuration CollatorSelectionLicenseBondOverride (r:0 w:1) - /// Proof: Configuration CollatorSelectionLicenseBondOverride (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Configuration::CollatorSelectionLicenseBondOverride` (r:0 w:1) + /// Proof: `Configuration::CollatorSelectionLicenseBondOverride` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_collator_selection_license_bond() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_758_000 picoseconds. - Weight::from_parts(2_911_000, 0) + // Minimum execution time: 6_652_000 picoseconds. + Weight::from_parts(6_908_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:0 w:1) - /// Proof: Configuration CollatorSelectionKickThresholdOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Configuration::CollatorSelectionKickThresholdOverride` (r:0 w:1) + /// Proof: `Configuration::CollatorSelectionKickThresholdOverride` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_collator_selection_kick_threshold() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_695_000 picoseconds. - Weight::from_parts(2_829_000, 0) + // Minimum execution time: 6_681_000 picoseconds. + Weight::from_parts(6_885_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Configuration WeightToFeeCoefficientOverride (r:0 w:1) - /// Proof: Configuration WeightToFeeCoefficientOverride (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Configuration::WeightToFeeCoefficientOverride` (r:0 w:1) + /// Proof: `Configuration::WeightToFeeCoefficientOverride` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set_weight_to_fee_coefficient_override() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 990_000 picoseconds. - Weight::from_parts(1_090_000, 0) + // Minimum execution time: 2_478_000 picoseconds. + Weight::from_parts(2_614_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Configuration MinGasPriceOverride (r:0 w:1) - /// Proof: Configuration MinGasPriceOverride (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: unknown `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) - /// Proof Skipped: unknown `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) - /// Storage: unknown `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) - /// Proof Skipped: unknown `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) + /// Storage: `Configuration::MinGasPriceOverride` (r:0 w:1) + /// Proof: `Configuration::MinGasPriceOverride` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xc1fef3b7207c11a52df13c12884e772609bc3a1e532c9cb85d57feed02cbff8e` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xc1fef3b7207c11a52df13c12884e77263864ade243c642793ebcfe9e16f454ca` (r:0 w:1) fn set_min_gas_price_override() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_469_000 picoseconds. - Weight::from_parts(1_565_000, 0) + // Minimum execution time: 3_934_000 picoseconds. + Weight::from_parts(4_092_000, 0) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Configuration AppPromomotionConfigurationOverride (r:0 w:1) - /// Proof: Configuration AppPromomotionConfigurationOverride (max_values: Some(1), max_size: Some(17), added: 512, mode: MaxEncodedLen) + /// Storage: `Configuration::AppPromomotionConfigurationOverride` (r:0 w:1) + /// Proof: `Configuration::AppPromomotionConfigurationOverride` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn set_app_promotion_configuration_override() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_027_000 picoseconds. - Weight::from_parts(1_098_000, 0) + // Minimum execution time: 2_657_000 picoseconds. + Weight::from_parts(2_768_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Configuration CollatorSelectionDesiredCollatorsOverride (r:0 w:1) - /// Proof: Configuration CollatorSelectionDesiredCollatorsOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Configuration::CollatorSelectionDesiredCollatorsOverride` (r:0 w:1) + /// Proof: `Configuration::CollatorSelectionDesiredCollatorsOverride` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_collator_selection_desired_collators() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_149_000 picoseconds. - Weight::from_parts(4_326_000, 0) + // Minimum execution time: 9_148_000 picoseconds. + Weight::from_parts(9_521_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Configuration CollatorSelectionLicenseBondOverride (r:0 w:1) - /// Proof: Configuration CollatorSelectionLicenseBondOverride (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: `Configuration::CollatorSelectionLicenseBondOverride` (r:0 w:1) + /// Proof: `Configuration::CollatorSelectionLicenseBondOverride` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn set_collator_selection_license_bond() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_758_000 picoseconds. - Weight::from_parts(2_911_000, 0) + // Minimum execution time: 6_652_000 picoseconds. + Weight::from_parts(6_908_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Configuration CollatorSelectionKickThresholdOverride (r:0 w:1) - /// Proof: Configuration CollatorSelectionKickThresholdOverride (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Configuration::CollatorSelectionKickThresholdOverride` (r:0 w:1) + /// Proof: `Configuration::CollatorSelectionKickThresholdOverride` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_collator_selection_kick_threshold() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_695_000 picoseconds. - Weight::from_parts(2_829_000, 0) + // Minimum execution time: 6_681_000 picoseconds. + Weight::from_parts(6_885_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/pallets/evm-migration/src/weights.rs b/pallets/evm-migration/src/weights.rs index 66f307fb43..fb4cd1a3f1 100644 --- a/pallets/evm-migration/src/weights.rs +++ b/pallets/evm-migration/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_evm_migration //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/evm-migration/src/weights.rs @@ -43,50 +43,50 @@ pub trait WeightInfo { /// Weights for pallet_evm_migration using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: EvmMigration MigrationPending (r:1 w:1) - /// Proof: EvmMigration MigrationPending (max_values: None, max_size: Some(29), added: 2504, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: EVM AccountCodes (r:1 w:0) - /// Proof Skipped: EVM AccountCodes (max_values: None, max_size: None, mode: Measured) + /// Storage: `EvmMigration::MigrationPending` (r:1 w:1) + /// Proof: `EvmMigration::MigrationPending` (`max_values`: None, `max_size`: Some(29), added: 2504, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `EVM::AccountCodes` (r:1 w:0) + /// Proof: `EVM::AccountCodes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn begin() -> Weight { // Proof Size summary in bytes: // Measured: `94` // Estimated: `3593` - // Minimum execution time: 6_131_000 picoseconds. - Weight::from_parts(6_351_000, 3593) + // Minimum execution time: 12_866_000 picoseconds. + Weight::from_parts(13_129_000, 3593) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: EvmMigration MigrationPending (r:1 w:0) - /// Proof: EvmMigration MigrationPending (max_values: None, max_size: Some(29), added: 2504, mode: MaxEncodedLen) - /// Storage: EVM AccountStorages (r:0 w:80) - /// Proof Skipped: EVM AccountStorages (max_values: None, max_size: None, mode: Measured) + /// Storage: `EvmMigration::MigrationPending` (r:1 w:0) + /// Proof: `EvmMigration::MigrationPending` (`max_values`: None, `max_size`: Some(29), added: 2504, mode: `MaxEncodedLen`) + /// Storage: `EVM::AccountStorages` (r:0 w:80) + /// Proof: `EVM::AccountStorages` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[0, 80]`. fn set_data(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `96` // Estimated: `3494` - // Minimum execution time: 4_522_000 picoseconds. - Weight::from_parts(4_569_839, 3494) - // Standard Error: 253 - .saturating_add(Weight::from_parts(743_780, 0).saturating_mul(b.into())) + // Minimum execution time: 6_855_000 picoseconds. + Weight::from_parts(7_515_980, 3494) + // Standard Error: 564 + .saturating_add(Weight::from_parts(1_535_437, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(b.into()))) } - /// Storage: EvmMigration MigrationPending (r:1 w:1) - /// Proof: EvmMigration MigrationPending (max_values: None, max_size: Some(29), added: 2504, mode: MaxEncodedLen) - /// Storage: EVM AccountCodes (r:0 w:1) - /// Proof Skipped: EVM AccountCodes (max_values: None, max_size: None, mode: Measured) + /// Storage: `EvmMigration::MigrationPending` (r:1 w:1) + /// Proof: `EvmMigration::MigrationPending` (`max_values`: None, `max_size`: Some(29), added: 2504, mode: `MaxEncodedLen`) + /// Storage: `EVM::AccountCodes` (r:0 w:1) + /// Proof: `EVM::AccountCodes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[0, 80]`. fn finish(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `96` // Estimated: `3494` - // Minimum execution time: 5_329_000 picoseconds. - Weight::from_parts(5_677_312, 3494) - // Standard Error: 22 - .saturating_add(Weight::from_parts(1_369, 0).saturating_mul(b.into())) + // Minimum execution time: 9_414_000 picoseconds. + Weight::from_parts(9_880_884, 3494) + // Standard Error: 63 + .saturating_add(Weight::from_parts(3_067, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -95,69 +95,69 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 890_000 picoseconds. - Weight::from_parts(1_279_871, 0) - // Standard Error: 112 - .saturating_add(Weight::from_parts(408_968, 0).saturating_mul(b.into())) + // Minimum execution time: 1_983_000 picoseconds. + Weight::from_parts(4_354_523, 0) + // Standard Error: 312 + .saturating_add(Weight::from_parts(1_329_536, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 200]`. fn insert_events(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 896_000 picoseconds. - Weight::from_parts(1_975_680, 0) - // Standard Error: 117 - .saturating_add(Weight::from_parts(1_003_721, 0).saturating_mul(b.into())) + // Minimum execution time: 2_172_000 picoseconds. + Weight::from_parts(4_622_224, 0) + // Standard Error: 825 + .saturating_add(Weight::from_parts(3_311_216, 0).saturating_mul(b.into())) } } // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: EvmMigration MigrationPending (r:1 w:1) - /// Proof: EvmMigration MigrationPending (max_values: None, max_size: Some(29), added: 2504, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: EVM AccountCodes (r:1 w:0) - /// Proof Skipped: EVM AccountCodes (max_values: None, max_size: None, mode: Measured) + /// Storage: `EvmMigration::MigrationPending` (r:1 w:1) + /// Proof: `EvmMigration::MigrationPending` (`max_values`: None, `max_size`: Some(29), added: 2504, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `EVM::AccountCodes` (r:1 w:0) + /// Proof: `EVM::AccountCodes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn begin() -> Weight { // Proof Size summary in bytes: // Measured: `94` // Estimated: `3593` - // Minimum execution time: 6_131_000 picoseconds. - Weight::from_parts(6_351_000, 3593) + // Minimum execution time: 12_866_000 picoseconds. + Weight::from_parts(13_129_000, 3593) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: EvmMigration MigrationPending (r:1 w:0) - /// Proof: EvmMigration MigrationPending (max_values: None, max_size: Some(29), added: 2504, mode: MaxEncodedLen) - /// Storage: EVM AccountStorages (r:0 w:80) - /// Proof Skipped: EVM AccountStorages (max_values: None, max_size: None, mode: Measured) + /// Storage: `EvmMigration::MigrationPending` (r:1 w:0) + /// Proof: `EvmMigration::MigrationPending` (`max_values`: None, `max_size`: Some(29), added: 2504, mode: `MaxEncodedLen`) + /// Storage: `EVM::AccountStorages` (r:0 w:80) + /// Proof: `EVM::AccountStorages` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[0, 80]`. fn set_data(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `96` // Estimated: `3494` - // Minimum execution time: 4_522_000 picoseconds. - Weight::from_parts(4_569_839, 3494) - // Standard Error: 253 - .saturating_add(Weight::from_parts(743_780, 0).saturating_mul(b.into())) + // Minimum execution time: 6_855_000 picoseconds. + Weight::from_parts(7_515_980, 3494) + // Standard Error: 564 + .saturating_add(Weight::from_parts(1_535_437, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(b.into()))) } - /// Storage: EvmMigration MigrationPending (r:1 w:1) - /// Proof: EvmMigration MigrationPending (max_values: None, max_size: Some(29), added: 2504, mode: MaxEncodedLen) - /// Storage: EVM AccountCodes (r:0 w:1) - /// Proof Skipped: EVM AccountCodes (max_values: None, max_size: None, mode: Measured) + /// Storage: `EvmMigration::MigrationPending` (r:1 w:1) + /// Proof: `EvmMigration::MigrationPending` (`max_values`: None, `max_size`: Some(29), added: 2504, mode: `MaxEncodedLen`) + /// Storage: `EVM::AccountCodes` (r:0 w:1) + /// Proof: `EVM::AccountCodes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[0, 80]`. fn finish(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `96` // Estimated: `3494` - // Minimum execution time: 5_329_000 picoseconds. - Weight::from_parts(5_677_312, 3494) - // Standard Error: 22 - .saturating_add(Weight::from_parts(1_369, 0).saturating_mul(b.into())) + // Minimum execution time: 9_414_000 picoseconds. + Weight::from_parts(9_880_884, 3494) + // Standard Error: 63 + .saturating_add(Weight::from_parts(3_067, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -166,20 +166,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 890_000 picoseconds. - Weight::from_parts(1_279_871, 0) - // Standard Error: 112 - .saturating_add(Weight::from_parts(408_968, 0).saturating_mul(b.into())) + // Minimum execution time: 1_983_000 picoseconds. + Weight::from_parts(4_354_523, 0) + // Standard Error: 312 + .saturating_add(Weight::from_parts(1_329_536, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 200]`. fn insert_events(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 896_000 picoseconds. - Weight::from_parts(1_975_680, 0) - // Standard Error: 117 - .saturating_add(Weight::from_parts(1_003_721, 0).saturating_mul(b.into())) + // Minimum execution time: 2_172_000 picoseconds. + Weight::from_parts(4_622_224, 0) + // Standard Error: 825 + .saturating_add(Weight::from_parts(3_311_216, 0).saturating_mul(b.into())) } } diff --git a/pallets/foreign-assets/src/weights.rs b/pallets/foreign-assets/src/weights.rs index faaeba0177..c6fa4b15cf 100644 --- a/pallets/foreign-assets/src/weights.rs +++ b/pallets/foreign-assets/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_foreign_assets //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/foreign-assets/src/weights.rs @@ -40,49 +40,49 @@ pub trait WeightInfo { /// Weights for pallet_foreign_assets using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Common CreatedCollectionCount (r:1 w:1) - /// Proof: Common CreatedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Common DestroyedCollectionCount (r:1 w:0) - /// Proof: Common DestroyedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ForeignAssets NextForeignAssetId (r:1 w:1) - /// Proof: ForeignAssets NextForeignAssetId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ForeignAssets LocationToCurrencyIds (r:1 w:1) - /// Proof: ForeignAssets LocationToCurrencyIds (max_values: None, max_size: Some(614), added: 3089, mode: MaxEncodedLen) - /// Storage: ForeignAssets ForeignAssetLocations (r:1 w:1) - /// Proof: ForeignAssets ForeignAssetLocations (max_values: None, max_size: Some(614), added: 3089, mode: MaxEncodedLen) - /// Storage: ForeignAssets AssetMetadatas (r:1 w:1) - /// Proof: ForeignAssets AssetMetadatas (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) - /// Storage: ForeignAssets AssetBinding (r:1 w:1) - /// Proof: ForeignAssets AssetBinding (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Common AdminAmount (r:0 w:1) - /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:0 w:1) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Common CollectionProperties (r:0 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) - /// Storage: Common CollectionById (r:0 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CreatedCollectionCount` (r:1 w:1) + /// Proof: `Common::CreatedCollectionCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Common::DestroyedCollectionCount` (r:1 w:0) + /// Proof: `Common::DestroyedCollectionCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::NextForeignAssetId` (r:1 w:1) + /// Proof: `ForeignAssets::NextForeignAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::LocationToCurrencyIds` (r:1 w:1) + /// Proof: `ForeignAssets::LocationToCurrencyIds` (`max_values`: None, `max_size`: Some(614), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::ForeignAssetLocations` (r:1 w:1) + /// Proof: `ForeignAssets::ForeignAssetLocations` (`max_values`: None, `max_size`: Some(614), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::AssetMetadatas` (r:1 w:1) + /// Proof: `ForeignAssets::AssetMetadatas` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::AssetBinding` (r:1 w:1) + /// Proof: `ForeignAssets::AssetBinding` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Common::AdminAmount` (r:0 w:1) + /// Proof: `Common::AdminAmount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionPropertyPermissions` (r:0 w:1) + /// Proof: `Common::CollectionPropertyPermissions` (`max_values`: None, `max_size`: Some(16726), added: 19201, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionProperties` (r:0 w:1) + /// Proof: `Common::CollectionProperties` (`max_values`: None, `max_size`: Some(40992), added: 43467, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionById` (r:0 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn register_foreign_asset() -> Weight { // Proof Size summary in bytes: // Measured: `286` // Estimated: `6196` - // Minimum execution time: 33_294_000 picoseconds. - Weight::from_parts(34_011_000, 6196) + // Minimum execution time: 73_798_000 picoseconds. + Weight::from_parts(74_677_000, 6196) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) } - /// Storage: ForeignAssets ForeignAssetLocations (r:1 w:1) - /// Proof: ForeignAssets ForeignAssetLocations (max_values: None, max_size: Some(614), added: 3089, mode: MaxEncodedLen) - /// Storage: ForeignAssets AssetMetadatas (r:1 w:1) - /// Proof: ForeignAssets AssetMetadatas (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `ForeignAssets::ForeignAssetLocations` (r:1 w:1) + /// Proof: `ForeignAssets::ForeignAssetLocations` (`max_values`: None, `max_size`: Some(614), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::AssetMetadatas` (r:1 w:1) + /// Proof: `ForeignAssets::AssetMetadatas` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn update_foreign_asset() -> Weight { // Proof Size summary in bytes: // Measured: `197` // Estimated: `4079` - // Minimum execution time: 9_296_000 picoseconds. - Weight::from_parts(9_594_000, 4079) + // Minimum execution time: 17_398_000 picoseconds. + Weight::from_parts(17_697_000, 4079) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -90,49 +90,49 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Common CreatedCollectionCount (r:1 w:1) - /// Proof: Common CreatedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Common DestroyedCollectionCount (r:1 w:0) - /// Proof: Common DestroyedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ForeignAssets NextForeignAssetId (r:1 w:1) - /// Proof: ForeignAssets NextForeignAssetId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ForeignAssets LocationToCurrencyIds (r:1 w:1) - /// Proof: ForeignAssets LocationToCurrencyIds (max_values: None, max_size: Some(614), added: 3089, mode: MaxEncodedLen) - /// Storage: ForeignAssets ForeignAssetLocations (r:1 w:1) - /// Proof: ForeignAssets ForeignAssetLocations (max_values: None, max_size: Some(614), added: 3089, mode: MaxEncodedLen) - /// Storage: ForeignAssets AssetMetadatas (r:1 w:1) - /// Proof: ForeignAssets AssetMetadatas (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) - /// Storage: ForeignAssets AssetBinding (r:1 w:1) - /// Proof: ForeignAssets AssetBinding (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Common AdminAmount (r:0 w:1) - /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:0 w:1) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Common CollectionProperties (r:0 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) - /// Storage: Common CollectionById (r:0 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CreatedCollectionCount` (r:1 w:1) + /// Proof: `Common::CreatedCollectionCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Common::DestroyedCollectionCount` (r:1 w:0) + /// Proof: `Common::DestroyedCollectionCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::NextForeignAssetId` (r:1 w:1) + /// Proof: `ForeignAssets::NextForeignAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::LocationToCurrencyIds` (r:1 w:1) + /// Proof: `ForeignAssets::LocationToCurrencyIds` (`max_values`: None, `max_size`: Some(614), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::ForeignAssetLocations` (r:1 w:1) + /// Proof: `ForeignAssets::ForeignAssetLocations` (`max_values`: None, `max_size`: Some(614), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::AssetMetadatas` (r:1 w:1) + /// Proof: `ForeignAssets::AssetMetadatas` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::AssetBinding` (r:1 w:1) + /// Proof: `ForeignAssets::AssetBinding` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Common::AdminAmount` (r:0 w:1) + /// Proof: `Common::AdminAmount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionPropertyPermissions` (r:0 w:1) + /// Proof: `Common::CollectionPropertyPermissions` (`max_values`: None, `max_size`: Some(16726), added: 19201, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionProperties` (r:0 w:1) + /// Proof: `Common::CollectionProperties` (`max_values`: None, `max_size`: Some(40992), added: 43467, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionById` (r:0 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn register_foreign_asset() -> Weight { // Proof Size summary in bytes: // Measured: `286` // Estimated: `6196` - // Minimum execution time: 33_294_000 picoseconds. - Weight::from_parts(34_011_000, 6196) + // Minimum execution time: 73_798_000 picoseconds. + Weight::from_parts(74_677_000, 6196) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) } - /// Storage: ForeignAssets ForeignAssetLocations (r:1 w:1) - /// Proof: ForeignAssets ForeignAssetLocations (max_values: None, max_size: Some(614), added: 3089, mode: MaxEncodedLen) - /// Storage: ForeignAssets AssetMetadatas (r:1 w:1) - /// Proof: ForeignAssets AssetMetadatas (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `ForeignAssets::ForeignAssetLocations` (r:1 w:1) + /// Proof: `ForeignAssets::ForeignAssetLocations` (`max_values`: None, `max_size`: Some(614), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::AssetMetadatas` (r:1 w:1) + /// Proof: `ForeignAssets::AssetMetadatas` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn update_foreign_asset() -> Weight { // Proof Size summary in bytes: // Measured: `197` // Estimated: `4079` - // Minimum execution time: 9_296_000 picoseconds. - Weight::from_parts(9_594_000, 4079) + // Minimum execution time: 17_398_000 picoseconds. + Weight::from_parts(17_697_000, 4079) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/pallets/fungible/src/weights.rs b/pallets/fungible/src/weights.rs index f3522c4101..c65ab962cb 100644 --- a/pallets/fungible/src/weights.rs +++ b/pallets/fungible/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_fungible //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/fungible/src/weights.rs @@ -47,120 +47,120 @@ pub trait WeightInfo { /// Weights for pallet_fungible using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Fungible TotalSupply (r:1 w:1) - /// Proof: Fungible TotalSupply (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Fungible Balance (r:1 w:1) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) + /// Storage: `Fungible::TotalSupply` (r:1 w:1) + /// Proof: `Fungible::TotalSupply` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Balance` (r:1 w:1) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) fn create_item() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3542` - // Minimum execution time: 7_228_000 picoseconds. - Weight::from_parts(7_472_000, 3542) + // Minimum execution time: 16_400_000 picoseconds. + Weight::from_parts(16_766_000, 3542) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Fungible TotalSupply (r:1 w:1) - /// Proof: Fungible TotalSupply (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Fungible Balance (r:200 w:200) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) + /// Storage: `Fungible::TotalSupply` (r:1 w:1) + /// Proof: `Fungible::TotalSupply` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Balance` (r:200 w:200) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493 + b * (2552 ±0)` - // Minimum execution time: 2_398_000 picoseconds. - Weight::from_parts(4_432_908, 3493) - // Standard Error: 263 - .saturating_add(Weight::from_parts(2_617_422, 0).saturating_mul(b.into())) + // Minimum execution time: 4_433_000 picoseconds. + Weight::from_parts(8_081_231, 3493) + // Standard Error: 1_938 + .saturating_add(Weight::from_parts(8_035_527, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 2552).saturating_mul(b.into())) } - /// Storage: Fungible TotalSupply (r:1 w:1) - /// Proof: Fungible TotalSupply (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Fungible Balance (r:1 w:1) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) + /// Storage: `Fungible::TotalSupply` (r:1 w:1) + /// Proof: `Fungible::TotalSupply` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Balance` (r:1 w:1) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) fn burn_item() -> Weight { // Proof Size summary in bytes: // Measured: `197` // Estimated: `3542` - // Minimum execution time: 9_444_000 picoseconds. - Weight::from_parts(9_742_000, 3542) + // Minimum execution time: 18_076_000 picoseconds. + Weight::from_parts(18_369_000, 3542) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Fungible Balance (r:2 w:2) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) + /// Storage: `Fungible::Balance` (r:2 w:2) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) fn transfer_raw() -> Weight { // Proof Size summary in bytes: // Measured: `182` // Estimated: `6094` - // Minimum execution time: 9_553_000 picoseconds. - Weight::from_parts(9_852_000, 6094) + // Minimum execution time: 20_068_000 picoseconds. + Weight::from_parts(20_417_000, 6094) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Fungible Balance (r:1 w:0) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) - /// Storage: Fungible Allowance (r:0 w:1) - /// Proof: Fungible Allowance (max_values: None, max_size: Some(93), added: 2568, mode: MaxEncodedLen) + /// Storage: `Fungible::Balance` (r:1 w:0) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Allowance` (r:0 w:1) + /// Proof: `Fungible::Allowance` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `182` // Estimated: `3542` - // Minimum execution time: 8_435_000 picoseconds. - Weight::from_parts(8_714_000, 3542) + // Minimum execution time: 16_407_000 picoseconds. + Weight::from_parts(16_695_000, 3542) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Fungible Balance (r:1 w:0) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) - /// Storage: Fungible Allowance (r:0 w:1) - /// Proof: Fungible Allowance (max_values: None, max_size: Some(93), added: 2568, mode: MaxEncodedLen) + /// Storage: `Fungible::Balance` (r:1 w:0) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Allowance` (r:0 w:1) + /// Proof: `Fungible::Allowance` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) fn approve_from() -> Weight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3542` - // Minimum execution time: 8_475_000 picoseconds. - Weight::from_parts(8_735_000, 3542) + // Minimum execution time: 16_794_000 picoseconds. + Weight::from_parts(17_217_000, 3542) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Fungible Allowance (r:1 w:0) - /// Proof: Fungible Allowance (max_values: None, max_size: Some(93), added: 2568, mode: MaxEncodedLen) + /// Storage: `Fungible::Allowance` (r:1 w:0) + /// Proof: `Fungible::Allowance` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) fn check_allowed_raw() -> Weight { // Proof Size summary in bytes: // Measured: `210` // Estimated: `3558` - // Minimum execution time: 4_426_000 picoseconds. - Weight::from_parts(4_604_000, 3558) + // Minimum execution time: 6_608_000 picoseconds. + Weight::from_parts(6_770_000, 3558) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Fungible Allowance (r:0 w:1) - /// Proof: Fungible Allowance (max_values: None, max_size: Some(93), added: 2568, mode: MaxEncodedLen) + /// Storage: `Fungible::Allowance` (r:0 w:1) + /// Proof: `Fungible::Allowance` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) fn set_allowance_unchecked_raw() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_130_000 picoseconds. - Weight::from_parts(4_275_000, 0) + // Minimum execution time: 10_426_000 picoseconds. + Weight::from_parts(10_666_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Fungible Allowance (r:1 w:1) - /// Proof: Fungible Allowance (max_values: None, max_size: Some(93), added: 2568, mode: MaxEncodedLen) - /// Storage: Fungible TotalSupply (r:1 w:1) - /// Proof: Fungible TotalSupply (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Fungible Balance (r:1 w:1) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) + /// Storage: `Fungible::Allowance` (r:1 w:1) + /// Proof: `Fungible::Allowance` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) + /// Storage: `Fungible::TotalSupply` (r:1 w:1) + /// Proof: `Fungible::TotalSupply` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Balance` (r:1 w:1) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) fn burn_from() -> Weight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3558` - // Minimum execution time: 14_878_000 picoseconds. - Weight::from_parts(15_263_000, 3558) + // Minimum execution time: 30_352_000 picoseconds. + Weight::from_parts(30_909_000, 3558) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -168,120 +168,120 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Fungible TotalSupply (r:1 w:1) - /// Proof: Fungible TotalSupply (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Fungible Balance (r:1 w:1) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) + /// Storage: `Fungible::TotalSupply` (r:1 w:1) + /// Proof: `Fungible::TotalSupply` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Balance` (r:1 w:1) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) fn create_item() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3542` - // Minimum execution time: 7_228_000 picoseconds. - Weight::from_parts(7_472_000, 3542) + // Minimum execution time: 16_400_000 picoseconds. + Weight::from_parts(16_766_000, 3542) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Fungible TotalSupply (r:1 w:1) - /// Proof: Fungible TotalSupply (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Fungible Balance (r:200 w:200) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) + /// Storage: `Fungible::TotalSupply` (r:1 w:1) + /// Proof: `Fungible::TotalSupply` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Balance` (r:200 w:200) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 200]`. fn create_multiple_items_ex(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493 + b * (2552 ±0)` - // Minimum execution time: 2_398_000 picoseconds. - Weight::from_parts(4_432_908, 3493) - // Standard Error: 263 - .saturating_add(Weight::from_parts(2_617_422, 0).saturating_mul(b.into())) + // Minimum execution time: 4_433_000 picoseconds. + Weight::from_parts(8_081_231, 3493) + // Standard Error: 1_938 + .saturating_add(Weight::from_parts(8_035_527, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 2552).saturating_mul(b.into())) } - /// Storage: Fungible TotalSupply (r:1 w:1) - /// Proof: Fungible TotalSupply (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Fungible Balance (r:1 w:1) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) + /// Storage: `Fungible::TotalSupply` (r:1 w:1) + /// Proof: `Fungible::TotalSupply` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Balance` (r:1 w:1) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) fn burn_item() -> Weight { // Proof Size summary in bytes: // Measured: `197` // Estimated: `3542` - // Minimum execution time: 9_444_000 picoseconds. - Weight::from_parts(9_742_000, 3542) + // Minimum execution time: 18_076_000 picoseconds. + Weight::from_parts(18_369_000, 3542) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Fungible Balance (r:2 w:2) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) + /// Storage: `Fungible::Balance` (r:2 w:2) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) fn transfer_raw() -> Weight { // Proof Size summary in bytes: // Measured: `182` // Estimated: `6094` - // Minimum execution time: 9_553_000 picoseconds. - Weight::from_parts(9_852_000, 6094) + // Minimum execution time: 20_068_000 picoseconds. + Weight::from_parts(20_417_000, 6094) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Fungible Balance (r:1 w:0) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) - /// Storage: Fungible Allowance (r:0 w:1) - /// Proof: Fungible Allowance (max_values: None, max_size: Some(93), added: 2568, mode: MaxEncodedLen) + /// Storage: `Fungible::Balance` (r:1 w:0) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Allowance` (r:0 w:1) + /// Proof: `Fungible::Allowance` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `182` // Estimated: `3542` - // Minimum execution time: 8_435_000 picoseconds. - Weight::from_parts(8_714_000, 3542) + // Minimum execution time: 16_407_000 picoseconds. + Weight::from_parts(16_695_000, 3542) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Fungible Balance (r:1 w:0) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) - /// Storage: Fungible Allowance (r:0 w:1) - /// Proof: Fungible Allowance (max_values: None, max_size: Some(93), added: 2568, mode: MaxEncodedLen) + /// Storage: `Fungible::Balance` (r:1 w:0) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Allowance` (r:0 w:1) + /// Proof: `Fungible::Allowance` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) fn approve_from() -> Weight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3542` - // Minimum execution time: 8_475_000 picoseconds. - Weight::from_parts(8_735_000, 3542) + // Minimum execution time: 16_794_000 picoseconds. + Weight::from_parts(17_217_000, 3542) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Fungible Allowance (r:1 w:0) - /// Proof: Fungible Allowance (max_values: None, max_size: Some(93), added: 2568, mode: MaxEncodedLen) + /// Storage: `Fungible::Allowance` (r:1 w:0) + /// Proof: `Fungible::Allowance` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) fn check_allowed_raw() -> Weight { // Proof Size summary in bytes: // Measured: `210` // Estimated: `3558` - // Minimum execution time: 4_426_000 picoseconds. - Weight::from_parts(4_604_000, 3558) + // Minimum execution time: 6_608_000 picoseconds. + Weight::from_parts(6_770_000, 3558) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Fungible Allowance (r:0 w:1) - /// Proof: Fungible Allowance (max_values: None, max_size: Some(93), added: 2568, mode: MaxEncodedLen) + /// Storage: `Fungible::Allowance` (r:0 w:1) + /// Proof: `Fungible::Allowance` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) fn set_allowance_unchecked_raw() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_130_000 picoseconds. - Weight::from_parts(4_275_000, 0) + // Minimum execution time: 10_426_000 picoseconds. + Weight::from_parts(10_666_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Fungible Allowance (r:1 w:1) - /// Proof: Fungible Allowance (max_values: None, max_size: Some(93), added: 2568, mode: MaxEncodedLen) - /// Storage: Fungible TotalSupply (r:1 w:1) - /// Proof: Fungible TotalSupply (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Fungible Balance (r:1 w:1) - /// Proof: Fungible Balance (max_values: None, max_size: Some(77), added: 2552, mode: MaxEncodedLen) + /// Storage: `Fungible::Allowance` (r:1 w:1) + /// Proof: `Fungible::Allowance` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) + /// Storage: `Fungible::TotalSupply` (r:1 w:1) + /// Proof: `Fungible::TotalSupply` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Fungible::Balance` (r:1 w:1) + /// Proof: `Fungible::Balance` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) fn burn_from() -> Weight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3558` - // Minimum execution time: 14_878_000 picoseconds. - Weight::from_parts(15_263_000, 3558) + // Minimum execution time: 30_352_000 picoseconds. + Weight::from_parts(30_909_000, 3558) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/pallets/identity/src/weights.rs b/pallets/identity/src/weights.rs index 753b6c8e35..f08458481d 100644 --- a/pallets/identity/src/weights.rs +++ b/pallets/identity/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_identity //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-27, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/identity/src/weights.rs @@ -34,7 +34,7 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_identity. pub trait WeightInfo { fn add_registrar(r: u32, ) -> Weight; - fn set_identity(r: u32, x: u32, ) -> Weight; + fn set_identity(x: u32, r: u32, ) -> Weight; fn set_subs_new(s: u32, ) -> Weight; fn set_subs_old(p: u32, ) -> Weight; fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight; @@ -57,83 +57,83 @@ pub trait WeightInfo { /// Weights for pallet_identity using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 19]`. + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[2, 20]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `31 + r * (57 ±0)` + // Measured: `0 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_759_000 picoseconds. - Weight::from_parts(7_254_560, 2626) - // Standard Error: 231 - .saturating_add(Weight::from_parts(64_513, 0).saturating_mul(r.into())) + // Minimum execution time: 11_751_000 picoseconds. + Weight::from_parts(12_271_559, 2626) + // Standard Error: 696 + .saturating_add(Weight::from_parts(85_076, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 20]`. + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `x` is `[0, 100]`. - fn set_identity(r: u32, x: u32, ) -> Weight { + /// The range of component `r` is `[1, 20]`. + fn set_identity(x: u32, r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `441 + r * (5 ±0)` // Estimated: `11003` - // Minimum execution time: 14_134_000 picoseconds. - Weight::from_parts(12_591_985, 11003) - // Standard Error: 562 - .saturating_add(Weight::from_parts(77_682, 0).saturating_mul(r.into())) - // Standard Error: 109 - .saturating_add(Weight::from_parts(96_303, 0).saturating_mul(x.into())) + // Minimum execution time: 30_152_000 picoseconds. + Weight::from_parts(28_934_480, 11003) + // Standard Error: 131 + .saturating_add(Weight::from_parts(407_556, 0).saturating_mul(x.into())) + // Standard Error: 672 + .saturating_add(Weight::from_parts(82_838, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:100 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:100 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `100` // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 4_763_000 picoseconds. - Weight::from_parts(11_344_974, 11003) - // Standard Error: 401 - .saturating_add(Weight::from_parts(1_141_028, 0).saturating_mul(s.into())) + // Minimum execution time: 8_680_000 picoseconds. + Weight::from_parts(23_228_498, 11003) + // Standard Error: 1_817 + .saturating_add(Weight::from_parts(2_512_779, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `193 + p * (32 ±0)` // Estimated: `11003` - // Minimum execution time: 4_783_000 picoseconds. - Weight::from_parts(11_531_027, 11003) - // Standard Error: 369 - .saturating_add(Weight::from_parts(542_102, 0).saturating_mul(p.into())) + // Minimum execution time: 8_493_000 picoseconds. + Weight::from_parts(23_032_063, 11003) + // Standard Error: 1_672 + .saturating_add(Weight::from_parts(1_039_127, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. /// The range of component `x` is `[0, 100]`. @@ -141,123 +141,123 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `468 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 23_175_000 picoseconds. - Weight::from_parts(16_503_215, 11003) - // Standard Error: 625 - .saturating_add(Weight::from_parts(1_175, 0).saturating_mul(r.into())) - // Standard Error: 122 - .saturating_add(Weight::from_parts(533_184, 0).saturating_mul(s.into())) - // Standard Error: 122 - .saturating_add(Weight::from_parts(94_600, 0).saturating_mul(x.into())) + // Minimum execution time: 50_797_000 picoseconds. + Weight::from_parts(31_070_475, 11003) + // Standard Error: 1_513 + .saturating_add(Weight::from_parts(35_164, 0).saturating_mul(r.into())) + // Standard Error: 295 + .saturating_add(Weight::from_parts(1_030_210, 0).saturating_mul(s.into())) + // Standard Error: 295 + .saturating_add(Weight::from_parts(219_795, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[0, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `366 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 15_322_000 picoseconds. - Weight::from_parts(13_671_670, 11003) - // Standard Error: 722 - .saturating_add(Weight::from_parts(73_665, 0).saturating_mul(r.into())) - // Standard Error: 140 - .saturating_add(Weight::from_parts(124_598, 0).saturating_mul(x.into())) + // Minimum execution time: 30_925_000 picoseconds. + Weight::from_parts(29_686_659, 11003) + // Standard Error: 887 + .saturating_add(Weight::from_parts(92_175, 0).saturating_mul(r.into())) + // Standard Error: 173 + .saturating_add(Weight::from_parts(421_779, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[0, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `397 + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 13_268_000 picoseconds. - Weight::from_parts(12_489_352, 11003) - // Standard Error: 544 - .saturating_add(Weight::from_parts(35_424, 0).saturating_mul(r.into())) - // Standard Error: 106 - .saturating_add(Weight::from_parts(123_149, 0).saturating_mul(x.into())) + // Minimum execution time: 27_705_000 picoseconds. + Weight::from_parts(27_264_552, 11003) + // Standard Error: 791 + .saturating_add(Weight::from_parts(63_003, 0).saturating_mul(r.into())) + // Standard Error: 154 + .saturating_add(Weight::from_parts(417_161, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 19]`. + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[2, 20]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `88 + r * (57 ±0)` + // Measured: `31 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 4_845_000 picoseconds. - Weight::from_parts(5_147_478, 2626) - // Standard Error: 169 - .saturating_add(Weight::from_parts(55_561, 0).saturating_mul(r.into())) + // Minimum execution time: 7_587_000 picoseconds. + Weight::from_parts(7_938_817, 2626) + // Standard Error: 494 + .saturating_add(Weight::from_parts(67_900, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 19]`. + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[2, 20]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `88 + r * (57 ±0)` + // Measured: `31 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 4_191_000 picoseconds. - Weight::from_parts(4_478_351, 2626) - // Standard Error: 138 - .saturating_add(Weight::from_parts(53_627, 0).saturating_mul(r.into())) + // Minimum execution time: 6_558_000 picoseconds. + Weight::from_parts(6_973_093, 2626) + // Standard Error: 398 + .saturating_add(Weight::from_parts(63_901, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 19]`. + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[2, 20]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `88 + r * (57 ±0)` + // Measured: `31 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 4_003_000 picoseconds. - Weight::from_parts(4_303_365, 2626) - // Standard Error: 147 - .saturating_add(Weight::from_parts(52_472, 0).saturating_mul(r.into())) + // Minimum execution time: 6_666_000 picoseconds. + Weight::from_parts(7_050_744, 2626) + // Standard Error: 409 + .saturating_add(Weight::from_parts(61_617, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 19]`. + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// The range of component `r` is `[2, 20]`. /// The range of component `x` is `[0, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `444 + r * (57 ±0) + x * (66 ±0)` + // Measured: `387 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 11_465_000 picoseconds. - Weight::from_parts(10_326_049, 11003) - // Standard Error: 660 - .saturating_add(Weight::from_parts(48_922, 0).saturating_mul(r.into())) - // Standard Error: 122 - .saturating_add(Weight::from_parts(185_374, 0).saturating_mul(x.into())) + // Minimum execution time: 21_312_000 picoseconds. + Weight::from_parts(21_058_525, 11003) + // Standard Error: 905 + .saturating_add(Weight::from_parts(65_756, 0).saturating_mul(r.into())) + // Standard Error: 167 + .saturating_add(Weight::from_parts(655_603, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. /// The range of component `x` is `[0, 100]`. @@ -265,142 +265,142 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `665 + r * (12 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 34_933_000 picoseconds. - Weight::from_parts(28_994_022, 11003) - // Standard Error: 668 - .saturating_add(Weight::from_parts(21_722, 0).saturating_mul(r.into())) - // Standard Error: 130 - .saturating_add(Weight::from_parts(540_580, 0).saturating_mul(s.into())) - // Standard Error: 130 - .saturating_add(Weight::from_parts(89_348, 0).saturating_mul(x.into())) + // Minimum execution time: 75_631_000 picoseconds. + Weight::from_parts(55_794_077, 11003) + // Standard Error: 1_902 + .saturating_add(Weight::from_parts(64_363, 0).saturating_mul(r.into())) + // Standard Error: 371 + .saturating_add(Weight::from_parts(1_042_200, 0).saturating_mul(s.into())) + // Standard Error: 371 + .saturating_add(Weight::from_parts(221_969, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity IdentityOf (r:0 w:600) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:0 w:600) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `x` is `[0, 100]`. /// The range of component `n` is `[0, 600]`. fn force_insert_identities(x: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_770_000 picoseconds. - Weight::from_parts(2_875_000, 0) - // Standard Error: 281_295 - .saturating_add(Weight::from_parts(37_513_186, 0).saturating_mul(x.into())) - // Standard Error: 46_799 - .saturating_add(Weight::from_parts(7_949_936, 0).saturating_mul(n.into())) + // Minimum execution time: 6_400_000 picoseconds. + Weight::from_parts(6_550_000, 0) + // Standard Error: 2_053_703 + .saturating_add(Weight::from_parts(125_508_622, 0).saturating_mul(x.into())) + // Standard Error: 341_677 + .saturating_add(Weight::from_parts(25_567_941, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } - /// Storage: Identity SubsOf (r:600 w:0) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:0 w:600) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:600 w:0) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:0 w:600) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `x` is `[0, 100]`. /// The range of component `n` is `[0, 600]`. fn force_remove_identities(x: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 2_751_000 picoseconds. - Weight::from_parts(2_862_000, 990) - // Standard Error: 953 - .saturating_add(Weight::from_parts(28_947, 0).saturating_mul(x.into())) - // Standard Error: 158 - .saturating_add(Weight::from_parts(994_085, 0).saturating_mul(n.into())) + // Minimum execution time: 6_143_000 picoseconds. + Weight::from_parts(6_298_000, 990) + // Standard Error: 3_385 + .saturating_add(Weight::from_parts(50_803, 0).saturating_mul(x.into())) + // Standard Error: 563 + .saturating_add(Weight::from_parts(2_108_225, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5733).saturating_mul(n.into())) } - /// Storage: Identity SubsOf (r:600 w:600) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:600 w:600) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. /// The range of component `n` is `[0, 600]`. fn force_set_subs(s: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 2_671_000 picoseconds. - Weight::from_parts(2_814_000, 990) - // Standard Error: 785_159 - .saturating_add(Weight::from_parts(109_659_566, 0).saturating_mul(s.into())) - // Standard Error: 130_628 - .saturating_add(Weight::from_parts(19_169_269, 0).saturating_mul(n.into())) + // Minimum execution time: 6_250_000 picoseconds. + Weight::from_parts(6_366_000, 990) + // Standard Error: 4_608_791 + .saturating_add(Weight::from_parts(290_267_031, 0).saturating_mul(s.into())) + // Standard Error: 766_770 + .saturating_add(Weight::from_parts(50_932_487, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5733).saturating_mul(n.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// The range of component `s` is `[0, 99]`. + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 100]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `474 + s * (36 ±0)` + // Measured: `437 + s * (36 ±0)` // Estimated: `11003` - // Minimum execution time: 12_571_000 picoseconds. - Weight::from_parts(16_366_301, 11003) - // Standard Error: 217 - .saturating_add(Weight::from_parts(42_542, 0).saturating_mul(s.into())) + // Minimum execution time: 26_954_000 picoseconds. + Weight::from_parts(31_240_173, 11003) + // Standard Error: 547 + .saturating_add(Weight::from_parts(52_287, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `590 + s * (3 ±0)` // Estimated: `11003` - // Minimum execution time: 7_278_000 picoseconds. - Weight::from_parts(9_227_799, 11003) - // Standard Error: 104 - .saturating_add(Weight::from_parts(14_014, 0).saturating_mul(s.into())) + // Minimum execution time: 11_715_000 picoseconds. + Weight::from_parts(13_599_708, 11003) + // Standard Error: 236 + .saturating_add(Weight::from_parts(16_093, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `637 + s * (35 ±0)` // Estimated: `11003` - // Minimum execution time: 15_771_000 picoseconds. - Weight::from_parts(18_105_475, 11003) - // Standard Error: 129 - .saturating_add(Weight::from_parts(32_074, 0).saturating_mul(s.into())) + // Minimum execution time: 30_270_000 picoseconds. + Weight::from_parts(32_928_187, 11003) + // Standard Error: 361 + .saturating_add(Weight::from_parts(39_163, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `s` is `[0, 99]`. + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 100]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `703 + s * (37 ±0)` + // Measured: `665 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 14_093_000 picoseconds. - Weight::from_parts(16_125_177, 6723) - // Standard Error: 146 - .saturating_add(Weight::from_parts(39_270, 0).saturating_mul(s.into())) + // Minimum execution time: 23_796_000 picoseconds. + Weight::from_parts(26_106_458, 6723) + // Standard Error: 384 + .saturating_add(Weight::from_parts(43_963, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -408,83 +408,83 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 19]`. + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[2, 20]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `31 + r * (57 ±0)` + // Measured: `0 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_759_000 picoseconds. - Weight::from_parts(7_254_560, 2626) - // Standard Error: 231 - .saturating_add(Weight::from_parts(64_513, 0).saturating_mul(r.into())) + // Minimum execution time: 11_751_000 picoseconds. + Weight::from_parts(12_271_559, 2626) + // Standard Error: 696 + .saturating_add(Weight::from_parts(85_076, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 20]`. + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `x` is `[0, 100]`. - fn set_identity(r: u32, x: u32, ) -> Weight { + /// The range of component `r` is `[1, 20]`. + fn set_identity(x: u32, r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `441 + r * (5 ±0)` // Estimated: `11003` - // Minimum execution time: 14_134_000 picoseconds. - Weight::from_parts(12_591_985, 11003) - // Standard Error: 562 - .saturating_add(Weight::from_parts(77_682, 0).saturating_mul(r.into())) - // Standard Error: 109 - .saturating_add(Weight::from_parts(96_303, 0).saturating_mul(x.into())) + // Minimum execution time: 30_152_000 picoseconds. + Weight::from_parts(28_934_480, 11003) + // Standard Error: 131 + .saturating_add(Weight::from_parts(407_556, 0).saturating_mul(x.into())) + // Standard Error: 672 + .saturating_add(Weight::from_parts(82_838, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:100 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:100 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `100` // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 4_763_000 picoseconds. - Weight::from_parts(11_344_974, 11003) - // Standard Error: 401 - .saturating_add(Weight::from_parts(1_141_028, 0).saturating_mul(s.into())) + // Minimum execution time: 8_680_000 picoseconds. + Weight::from_parts(23_228_498, 11003) + // Standard Error: 1_817 + .saturating_add(Weight::from_parts(2_512_779, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `193 + p * (32 ±0)` // Estimated: `11003` - // Minimum execution time: 4_783_000 picoseconds. - Weight::from_parts(11_531_027, 11003) - // Standard Error: 369 - .saturating_add(Weight::from_parts(542_102, 0).saturating_mul(p.into())) + // Minimum execution time: 8_493_000 picoseconds. + Weight::from_parts(23_032_063, 11003) + // Standard Error: 1_672 + .saturating_add(Weight::from_parts(1_039_127, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. /// The range of component `x` is `[0, 100]`. @@ -492,123 +492,123 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `468 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 23_175_000 picoseconds. - Weight::from_parts(16_503_215, 11003) - // Standard Error: 625 - .saturating_add(Weight::from_parts(1_175, 0).saturating_mul(r.into())) - // Standard Error: 122 - .saturating_add(Weight::from_parts(533_184, 0).saturating_mul(s.into())) - // Standard Error: 122 - .saturating_add(Weight::from_parts(94_600, 0).saturating_mul(x.into())) + // Minimum execution time: 50_797_000 picoseconds. + Weight::from_parts(31_070_475, 11003) + // Standard Error: 1_513 + .saturating_add(Weight::from_parts(35_164, 0).saturating_mul(r.into())) + // Standard Error: 295 + .saturating_add(Weight::from_parts(1_030_210, 0).saturating_mul(s.into())) + // Standard Error: 295 + .saturating_add(Weight::from_parts(219_795, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[0, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `366 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 15_322_000 picoseconds. - Weight::from_parts(13_671_670, 11003) - // Standard Error: 722 - .saturating_add(Weight::from_parts(73_665, 0).saturating_mul(r.into())) - // Standard Error: 140 - .saturating_add(Weight::from_parts(124_598, 0).saturating_mul(x.into())) + // Minimum execution time: 30_925_000 picoseconds. + Weight::from_parts(29_686_659, 11003) + // Standard Error: 887 + .saturating_add(Weight::from_parts(92_175, 0).saturating_mul(r.into())) + // Standard Error: 173 + .saturating_add(Weight::from_parts(421_779, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[0, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `397 + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 13_268_000 picoseconds. - Weight::from_parts(12_489_352, 11003) - // Standard Error: 544 - .saturating_add(Weight::from_parts(35_424, 0).saturating_mul(r.into())) - // Standard Error: 106 - .saturating_add(Weight::from_parts(123_149, 0).saturating_mul(x.into())) + // Minimum execution time: 27_705_000 picoseconds. + Weight::from_parts(27_264_552, 11003) + // Standard Error: 791 + .saturating_add(Weight::from_parts(63_003, 0).saturating_mul(r.into())) + // Standard Error: 154 + .saturating_add(Weight::from_parts(417_161, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 19]`. + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[2, 20]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `88 + r * (57 ±0)` + // Measured: `31 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 4_845_000 picoseconds. - Weight::from_parts(5_147_478, 2626) - // Standard Error: 169 - .saturating_add(Weight::from_parts(55_561, 0).saturating_mul(r.into())) + // Minimum execution time: 7_587_000 picoseconds. + Weight::from_parts(7_938_817, 2626) + // Standard Error: 494 + .saturating_add(Weight::from_parts(67_900, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 19]`. + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[2, 20]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `88 + r * (57 ±0)` + // Measured: `31 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 4_191_000 picoseconds. - Weight::from_parts(4_478_351, 2626) - // Standard Error: 138 - .saturating_add(Weight::from_parts(53_627, 0).saturating_mul(r.into())) + // Minimum execution time: 6_558_000 picoseconds. + Weight::from_parts(6_973_093, 2626) + // Standard Error: 398 + .saturating_add(Weight::from_parts(63_901, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 19]`. + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[2, 20]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `88 + r * (57 ±0)` + // Measured: `31 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 4_003_000 picoseconds. - Weight::from_parts(4_303_365, 2626) - // Standard Error: 147 - .saturating_add(Weight::from_parts(52_472, 0).saturating_mul(r.into())) + // Minimum execution time: 6_666_000 picoseconds. + Weight::from_parts(7_050_744, 2626) + // Standard Error: 409 + .saturating_add(Weight::from_parts(61_617, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 19]`. + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// The range of component `r` is `[2, 20]`. /// The range of component `x` is `[0, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `444 + r * (57 ±0) + x * (66 ±0)` + // Measured: `387 + r * (57 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 11_465_000 picoseconds. - Weight::from_parts(10_326_049, 11003) - // Standard Error: 660 - .saturating_add(Weight::from_parts(48_922, 0).saturating_mul(r.into())) - // Standard Error: 122 - .saturating_add(Weight::from_parts(185_374, 0).saturating_mul(x.into())) + // Minimum execution time: 21_312_000 picoseconds. + Weight::from_parts(21_058_525, 11003) + // Standard Error: 905 + .saturating_add(Weight::from_parts(65_756, 0).saturating_mul(r.into())) + // Standard Error: 167 + .saturating_add(Weight::from_parts(655_603, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. /// The range of component `x` is `[0, 100]`. @@ -616,142 +616,142 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `665 + r * (12 ±0) + s * (32 ±0) + x * (66 ±0)` // Estimated: `11003` - // Minimum execution time: 34_933_000 picoseconds. - Weight::from_parts(28_994_022, 11003) - // Standard Error: 668 - .saturating_add(Weight::from_parts(21_722, 0).saturating_mul(r.into())) - // Standard Error: 130 - .saturating_add(Weight::from_parts(540_580, 0).saturating_mul(s.into())) - // Standard Error: 130 - .saturating_add(Weight::from_parts(89_348, 0).saturating_mul(x.into())) + // Minimum execution time: 75_631_000 picoseconds. + Weight::from_parts(55_794_077, 11003) + // Standard Error: 1_902 + .saturating_add(Weight::from_parts(64_363, 0).saturating_mul(r.into())) + // Standard Error: 371 + .saturating_add(Weight::from_parts(1_042_200, 0).saturating_mul(s.into())) + // Standard Error: 371 + .saturating_add(Weight::from_parts(221_969, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity IdentityOf (r:0 w:600) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:0 w:600) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `x` is `[0, 100]`. /// The range of component `n` is `[0, 600]`. fn force_insert_identities(x: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_770_000 picoseconds. - Weight::from_parts(2_875_000, 0) - // Standard Error: 281_295 - .saturating_add(Weight::from_parts(37_513_186, 0).saturating_mul(x.into())) - // Standard Error: 46_799 - .saturating_add(Weight::from_parts(7_949_936, 0).saturating_mul(n.into())) + // Minimum execution time: 6_400_000 picoseconds. + Weight::from_parts(6_550_000, 0) + // Standard Error: 2_053_703 + .saturating_add(Weight::from_parts(125_508_622, 0).saturating_mul(x.into())) + // Standard Error: 341_677 + .saturating_add(Weight::from_parts(25_567_941, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) } - /// Storage: Identity SubsOf (r:600 w:0) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:0 w:600) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:600 w:0) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:0 w:600) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `x` is `[0, 100]`. /// The range of component `n` is `[0, 600]`. fn force_remove_identities(x: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 2_751_000 picoseconds. - Weight::from_parts(2_862_000, 990) - // Standard Error: 953 - .saturating_add(Weight::from_parts(28_947, 0).saturating_mul(x.into())) - // Standard Error: 158 - .saturating_add(Weight::from_parts(994_085, 0).saturating_mul(n.into())) + // Minimum execution time: 6_143_000 picoseconds. + Weight::from_parts(6_298_000, 990) + // Standard Error: 3_385 + .saturating_add(Weight::from_parts(50_803, 0).saturating_mul(x.into())) + // Standard Error: 563 + .saturating_add(Weight::from_parts(2_108_225, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5733).saturating_mul(n.into())) } - /// Storage: Identity SubsOf (r:600 w:600) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:600 w:600) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. /// The range of component `n` is `[0, 600]`. fn force_set_subs(s: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `41` // Estimated: `990 + n * (5733 ±0)` - // Minimum execution time: 2_671_000 picoseconds. - Weight::from_parts(2_814_000, 990) - // Standard Error: 785_159 - .saturating_add(Weight::from_parts(109_659_566, 0).saturating_mul(s.into())) - // Standard Error: 130_628 - .saturating_add(Weight::from_parts(19_169_269, 0).saturating_mul(n.into())) + // Minimum execution time: 6_250_000 picoseconds. + Weight::from_parts(6_366_000, 990) + // Standard Error: 4_608_791 + .saturating_add(Weight::from_parts(290_267_031, 0).saturating_mul(s.into())) + // Standard Error: 766_770 + .saturating_add(Weight::from_parts(50_932_487, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5733).saturating_mul(n.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// The range of component `s` is `[0, 99]`. + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 100]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `474 + s * (36 ±0)` + // Measured: `437 + s * (36 ±0)` // Estimated: `11003` - // Minimum execution time: 12_571_000 picoseconds. - Weight::from_parts(16_366_301, 11003) - // Standard Error: 217 - .saturating_add(Weight::from_parts(42_542, 0).saturating_mul(s.into())) + // Minimum execution time: 26_954_000 picoseconds. + Weight::from_parts(31_240_173, 11003) + // Standard Error: 547 + .saturating_add(Weight::from_parts(52_287, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `590 + s * (3 ±0)` // Estimated: `11003` - // Minimum execution time: 7_278_000 picoseconds. - Weight::from_parts(9_227_799, 11003) - // Standard Error: 104 - .saturating_add(Weight::from_parts(14_014, 0).saturating_mul(s.into())) + // Minimum execution time: 11_715_000 picoseconds. + Weight::from_parts(13_599_708, 11003) + // Standard Error: 236 + .saturating_add(Weight::from_parts(16_093, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `637 + s * (35 ±0)` // Estimated: `11003` - // Minimum execution time: 15_771_000 picoseconds. - Weight::from_parts(18_105_475, 11003) - // Standard Error: 129 - .saturating_add(Weight::from_parts(32_074, 0).saturating_mul(s.into())) + // Minimum execution time: 30_270_000 picoseconds. + Weight::from_parts(32_928_187, 11003) + // Standard Error: 361 + .saturating_add(Weight::from_parts(39_163, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `s` is `[0, 99]`. + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 100]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `703 + s * (37 ±0)` + // Measured: `665 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 14_093_000 picoseconds. - Weight::from_parts(16_125_177, 6723) - // Standard Error: 146 - .saturating_add(Weight::from_parts(39_270, 0).saturating_mul(s.into())) + // Minimum execution time: 23_796_000 picoseconds. + Weight::from_parts(26_106_458, 6723) + // Standard Error: 384 + .saturating_add(Weight::from_parts(43_963, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/pallets/maintenance/src/weights.rs b/pallets/maintenance/src/weights.rs index 82fd8286ea..ed1d78fa0e 100644 --- a/pallets/maintenance/src/weights.rs +++ b/pallets/maintenance/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_maintenance //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/maintenance/src/weights.rs @@ -40,48 +40,48 @@ pub trait WeightInfo { /// Weights for pallet_maintenance using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Maintenance Enabled (r:0 w:1) - /// Proof: Maintenance Enabled (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Maintenance::Enabled` (r:0 w:1) + /// Proof: `Maintenance::Enabled` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn enable() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_015_000 picoseconds. - Weight::from_parts(3_184_000, 0) + // Minimum execution time: 7_175_000 picoseconds. + Weight::from_parts(7_380_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Maintenance Enabled (r:0 w:1) - /// Proof: Maintenance Enabled (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Maintenance::Enabled` (r:0 w:1) + /// Proof: `Maintenance::Enabled` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn disable() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_976_000 picoseconds. - Weight::from_parts(3_111_000, 0) + // Minimum execution time: 7_043_000 picoseconds. + Weight::from_parts(7_234_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Maintenance Enabled (r:0 w:1) - /// Proof: Maintenance Enabled (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Maintenance::Enabled` (r:0 w:1) + /// Proof: `Maintenance::Enabled` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn enable() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_015_000 picoseconds. - Weight::from_parts(3_184_000, 0) + // Minimum execution time: 7_175_000 picoseconds. + Weight::from_parts(7_380_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Maintenance Enabled (r:0 w:1) - /// Proof: Maintenance Enabled (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) + /// Storage: `Maintenance::Enabled` (r:0 w:1) + /// Proof: `Maintenance::Enabled` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) fn disable() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_976_000 picoseconds. - Weight::from_parts(3_111_000, 0) + // Minimum execution time: 7_043_000 picoseconds. + Weight::from_parts(7_234_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/pallets/nonfungible/src/weights.rs b/pallets/nonfungible/src/weights.rs index 82802a2b33..e87381e828 100644 --- a/pallets/nonfungible/src/weights.rs +++ b/pallets/nonfungible/src/weights.rs @@ -5,7 +5,7 @@ //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` +//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -65,8 +65,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 15_410_000 picoseconds. - Weight::from_parts(15_850_000, 3530) + // Minimum execution time: 25_209_000 picoseconds. + Weight::from_parts(25_648_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -83,10 +83,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 3_300_000 picoseconds. - Weight::from_parts(5_992_994, 3530) - // Standard Error: 4_478 - .saturating_add(Weight::from_parts(8_002_092, 0).saturating_mul(b.into())) + // Minimum execution time: 6_239_000 picoseconds. + Weight::from_parts(11_021_733, 3530) + // Standard Error: 3_013 + .saturating_add(Weight::from_parts(9_580_947, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -104,10 +104,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_300_000 picoseconds. - Weight::from_parts(3_980_000, 3481) - // Standard Error: 1_382 - .saturating_add(Weight::from_parts(11_259_286, 0).saturating_mul(b.into())) + // Minimum execution time: 6_278_000 picoseconds. + Weight::from_parts(5_169_950, 3481) + // Standard Error: 3_419 + .saturating_add(Weight::from_parts(13_514_569, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -132,8 +132,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 26_360_000 picoseconds. - Weight::from_parts(26_850_000, 3530) + // Minimum execution time: 39_015_000 picoseconds. + Weight::from_parts(39_562_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -149,8 +149,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 22_710_000 picoseconds. - Weight::from_parts(23_130_000, 6070) + // Minimum execution time: 32_930_000 picoseconds. + Weight::from_parts(33_398_000, 6070) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -162,8 +162,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 11_520_000 picoseconds. - Weight::from_parts(12_030_000, 3522) + // Minimum execution time: 17_411_000 picoseconds. + Weight::from_parts(17_790_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -175,8 +175,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 11_570_000 picoseconds. - Weight::from_parts(12_139_000, 3522) + // Minimum execution time: 17_707_000 picoseconds. + Weight::from_parts(18_035_000, 3522) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -186,8 +186,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 4_210_000 picoseconds. - Weight::from_parts(4_350_000, 3522) + // Minimum execution time: 6_353_000 picoseconds. + Weight::from_parts(6_515_000, 3522) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Nonfungible::Allowance` (r:1 w:1) @@ -208,8 +208,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 32_230_000 picoseconds. - Weight::from_parts(33_210_000, 3530) + // Minimum execution time: 47_086_000 picoseconds. + Weight::from_parts(47_687_000, 3530) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -219,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 3_180_000 picoseconds. - Weight::from_parts(3_370_000, 36269) + // Minimum execution time: 4_868_000 picoseconds. + Weight::from_parts(4_994_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Nonfungible::TokenProperties` (r:0 w:1) @@ -230,10 +230,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 440_000 picoseconds. - Weight::from_parts(3_567_990, 0) - // Standard Error: 24_013 - .saturating_add(Weight::from_parts(19_386_123, 0).saturating_mul(b.into())) + // Minimum execution time: 860_000 picoseconds. + Weight::from_parts(886_000, 0) + // Standard Error: 70_909 + .saturating_add(Weight::from_parts(38_734_650, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Common::CollectionPropertyPermissions` (r:1 w:1) @@ -243,10 +243,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_460_000 picoseconds. - Weight::from_parts(1_530_000, 20191) - // Standard Error: 124_929 - .saturating_add(Weight::from_parts(28_397_581, 0).saturating_mul(b.into())) + // Minimum execution time: 3_151_000 picoseconds. + Weight::from_parts(3_276_000, 20191) + // Standard Error: 169_159 + .saturating_add(Weight::from_parts(38_018_122, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -256,8 +256,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_840_000 picoseconds. - Weight::from_parts(7_160_000, 0) + // Minimum execution time: 11_146_000 picoseconds. + Weight::from_parts(11_344_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Nonfungible::CollectionAllowance` (r:1 w:0) @@ -266,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 3_630_000 picoseconds. - Weight::from_parts(3_780_000, 3576) + // Minimum execution time: 5_413_000 picoseconds. + Weight::from_parts(5_593_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Nonfungible::TokenProperties` (r:1 w:1) @@ -276,8 +276,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 3_280_000 picoseconds. - Weight::from_parts(3_480_000, 36269) + // Minimum execution time: 4_968_000 picoseconds. + Weight::from_parts(5_138_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -297,8 +297,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 15_410_000 picoseconds. - Weight::from_parts(15_850_000, 3530) + // Minimum execution time: 25_209_000 picoseconds. + Weight::from_parts(25_648_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -315,10 +315,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3530` - // Minimum execution time: 3_300_000 picoseconds. - Weight::from_parts(5_992_994, 3530) - // Standard Error: 4_478 - .saturating_add(Weight::from_parts(8_002_092, 0).saturating_mul(b.into())) + // Minimum execution time: 6_239_000 picoseconds. + Weight::from_parts(11_021_733, 3530) + // Standard Error: 3_013 + .saturating_add(Weight::from_parts(9_580_947, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(b.into()))) @@ -336,10 +336,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_300_000 picoseconds. - Weight::from_parts(3_980_000, 3481) - // Standard Error: 1_382 - .saturating_add(Weight::from_parts(11_259_286, 0).saturating_mul(b.into())) + // Minimum execution time: 6_278_000 picoseconds. + Weight::from_parts(5_169_950, 3481) + // Standard Error: 3_419 + .saturating_add(Weight::from_parts(13_514_569, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -364,8 +364,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `3530` - // Minimum execution time: 26_360_000 picoseconds. - Weight::from_parts(26_850_000, 3530) + // Minimum execution time: 39_015_000 picoseconds. + Weight::from_parts(39_562_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -381,8 +381,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `380` // Estimated: `6070` - // Minimum execution time: 22_710_000 picoseconds. - Weight::from_parts(23_130_000, 6070) + // Minimum execution time: 32_930_000 picoseconds. + Weight::from_parts(33_398_000, 6070) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -394,8 +394,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3522` - // Minimum execution time: 11_520_000 picoseconds. - Weight::from_parts(12_030_000, 3522) + // Minimum execution time: 17_411_000 picoseconds. + Weight::from_parts(17_790_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -407,8 +407,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3522` - // Minimum execution time: 11_570_000 picoseconds. - Weight::from_parts(12_139_000, 3522) + // Minimum execution time: 17_707_000 picoseconds. + Weight::from_parts(18_035_000, 3522) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -418,8 +418,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `362` // Estimated: `3522` - // Minimum execution time: 4_210_000 picoseconds. - Weight::from_parts(4_350_000, 3522) + // Minimum execution time: 6_353_000 picoseconds. + Weight::from_parts(6_515_000, 3522) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Nonfungible::Allowance` (r:1 w:1) @@ -440,8 +440,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3530` - // Minimum execution time: 32_230_000 picoseconds. - Weight::from_parts(33_210_000, 3530) + // Minimum execution time: 47_086_000 picoseconds. + Weight::from_parts(47_687_000, 3530) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -451,8 +451,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 3_180_000 picoseconds. - Weight::from_parts(3_370_000, 36269) + // Minimum execution time: 4_868_000 picoseconds. + Weight::from_parts(4_994_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Nonfungible::TokenProperties` (r:0 w:1) @@ -462,10 +462,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 440_000 picoseconds. - Weight::from_parts(3_567_990, 0) - // Standard Error: 24_013 - .saturating_add(Weight::from_parts(19_386_123, 0).saturating_mul(b.into())) + // Minimum execution time: 860_000 picoseconds. + Weight::from_parts(886_000, 0) + // Standard Error: 70_909 + .saturating_add(Weight::from_parts(38_734_650, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Common::CollectionPropertyPermissions` (r:1 w:1) @@ -475,10 +475,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_460_000 picoseconds. - Weight::from_parts(1_530_000, 20191) - // Standard Error: 124_929 - .saturating_add(Weight::from_parts(28_397_581, 0).saturating_mul(b.into())) + // Minimum execution time: 3_151_000 picoseconds. + Weight::from_parts(3_276_000, 20191) + // Standard Error: 169_159 + .saturating_add(Weight::from_parts(38_018_122, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -488,8 +488,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_840_000 picoseconds. - Weight::from_parts(7_160_000, 0) + // Minimum execution time: 11_146_000 picoseconds. + Weight::from_parts(11_344_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Nonfungible::CollectionAllowance` (r:1 w:0) @@ -498,8 +498,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3576` - // Minimum execution time: 3_630_000 picoseconds. - Weight::from_parts(3_780_000, 3576) + // Minimum execution time: 5_413_000 picoseconds. + Weight::from_parts(5_593_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Nonfungible::TokenProperties` (r:1 w:1) @@ -508,8 +508,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `279` // Estimated: `36269` - // Minimum execution time: 3_280_000 picoseconds. - Weight::from_parts(3_480_000, 36269) + // Minimum execution time: 4_968_000 picoseconds. + Weight::from_parts(5_138_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/refungible/src/weights.rs b/pallets/refungible/src/weights.rs index 2806a2ff2b..e2c2140912 100644 --- a/pallets/refungible/src/weights.rs +++ b/pallets/refungible/src/weights.rs @@ -5,7 +5,7 @@ //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `hearthstone`, CPU: `AMD Ryzen 9 7950X3D 16-Core Processor` +//! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: @@ -76,8 +76,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 19_400_000 picoseconds. - Weight::from_parts(19_890_000, 3530) + // Minimum execution time: 29_966_000 picoseconds. + Weight::from_parts(30_393_000, 3530) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -96,10 +96,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 3_120_000 picoseconds. - Weight::from_parts(3_310_000, 3530) - // Standard Error: 2_748 - .saturating_add(Weight::from_parts(11_489_631, 0).saturating_mul(b.into())) + // Minimum execution time: 5_693_000 picoseconds. + Weight::from_parts(109_204, 3530) + // Standard Error: 5_369 + .saturating_add(Weight::from_parts(13_553_747, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -119,10 +119,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_180_000 picoseconds. - Weight::from_parts(2_015_490, 3481) - // Standard Error: 6_052 - .saturating_add(Weight::from_parts(14_837_077, 0).saturating_mul(b.into())) + // Minimum execution time: 5_758_000 picoseconds. + Weight::from_parts(7_034_227, 3481) + // Standard Error: 3_531 + .saturating_add(Weight::from_parts(17_131_873, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -144,10 +144,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 5_200_000 picoseconds. - Weight::from_parts(25_301_631, 3481) - // Standard Error: 6_177 - .saturating_add(Weight::from_parts(11_197_931, 0).saturating_mul(b.into())) + // Minimum execution time: 8_857_000 picoseconds. + Weight::from_parts(9_650_186, 3481) + // Standard Error: 3_518 + .saturating_add(Weight::from_parts(13_536_987, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -166,8 +166,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 29_540_000 picoseconds. - Weight::from_parts(30_190_000, 8682) + // Minimum execution time: 44_423_000 picoseconds. + Weight::from_parts(45_092_000, 8682) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -187,8 +187,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 30_650_000 picoseconds. - Weight::from_parts(31_370_000, 3554) + // Minimum execution time: 44_902_000 picoseconds. + Weight::from_parts(45_473_000, 3554) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -200,8 +200,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 18_530_000 picoseconds. - Weight::from_parts(19_010_000, 6118) + // Minimum execution time: 27_627_000 picoseconds. + Weight::from_parts(28_046_000, 6118) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -217,8 +217,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 24_240_000 picoseconds. - Weight::from_parts(24_760_000, 6118) + // Minimum execution time: 35_866_000 picoseconds. + Weight::from_parts(36_441_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -234,8 +234,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 25_990_000 picoseconds. - Weight::from_parts(26_650_000, 6118) + // Minimum execution time: 38_370_000 picoseconds. + Weight::from_parts(38_855_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -251,8 +251,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 29_550_000 picoseconds. - Weight::from_parts(30_530_000, 6118) + // Minimum execution time: 43_244_000 picoseconds. + Weight::from_parts(43_649_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -264,8 +264,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 11_420_000 picoseconds. - Weight::from_parts(11_810_000, 3554) + // Minimum execution time: 17_775_000 picoseconds. + Weight::from_parts(18_062_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -277,8 +277,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 11_610_000 picoseconds. - Weight::from_parts(11_950_000, 3554) + // Minimum execution time: 18_304_000 picoseconds. + Weight::from_parts(18_534_000, 3554) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -292,8 +292,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 28_510_000 picoseconds. - Weight::from_parts(29_180_000, 6118) + // Minimum execution time: 42_123_000 picoseconds. + Weight::from_parts(42_887_000, 6118) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -311,8 +311,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 34_370_000 picoseconds. - Weight::from_parts(35_270_000, 6118) + // Minimum execution time: 50_549_000 picoseconds. + Weight::from_parts(51_144_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -330,8 +330,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 36_490_000 picoseconds. - Weight::from_parts(37_160_000, 6118) + // Minimum execution time: 52_978_000 picoseconds. + Weight::from_parts(53_931_000, 6118) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -349,8 +349,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 40_080_000 picoseconds. - Weight::from_parts(48_310_000, 6118) + // Minimum execution time: 57_512_000 picoseconds. + Weight::from_parts(58_148_000, 6118) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -372,8 +372,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 41_100_000 picoseconds. - Weight::from_parts(42_060_000, 3570) + // Minimum execution time: 59_841_000 picoseconds. + Weight::from_parts(60_643_000, 3570) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -383,8 +383,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 2_520_000 picoseconds. - Weight::from_parts(2_670_000, 36269) + // Minimum execution time: 3_752_000 picoseconds. + Weight::from_parts(3_933_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Refungible::TokenProperties` (r:0 w:1) @@ -394,10 +394,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 490_000 picoseconds. - Weight::from_parts(3_457_547, 0) - // Standard Error: 24_239 - .saturating_add(Weight::from_parts(19_382_722, 0).saturating_mul(b.into())) + // Minimum execution time: 851_000 picoseconds. + Weight::from_parts(879_000, 0) + // Standard Error: 27_977 + .saturating_add(Weight::from_parts(37_787_661, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Common::CollectionPropertyPermissions` (r:1 w:1) @@ -407,10 +407,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_500_000 picoseconds. - Weight::from_parts(1_590_000, 20191) - // Standard Error: 123_927 - .saturating_add(Weight::from_parts(27_355_093, 0).saturating_mul(b.into())) + // Minimum execution time: 3_212_000 picoseconds. + Weight::from_parts(3_312_000, 20191) + // Standard Error: 169_099 + .saturating_add(Weight::from_parts(37_467_090, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -422,8 +422,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 14_340_000 picoseconds. - Weight::from_parts(14_590_000, 3554) + // Minimum execution time: 21_917_000 picoseconds. + Weight::from_parts(22_248_000, 3554) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -433,8 +433,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_390_000 picoseconds. - Weight::from_parts(6_650_000, 0) + // Minimum execution time: 11_167_000 picoseconds. + Weight::from_parts(11_372_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Refungible::CollectionAllowance` (r:1 w:0) @@ -443,8 +443,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 3_060_000 picoseconds. - Weight::from_parts(3_210_000, 3576) + // Minimum execution time: 4_851_000 picoseconds. + Weight::from_parts(4_988_000, 3576) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Refungible::TokenProperties` (r:1 w:1) @@ -453,8 +453,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 2_480_000 picoseconds. - Weight::from_parts(2_620_000, 36269) + // Minimum execution time: 4_130_000 picoseconds. + Weight::from_parts(4_267_000, 36269) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -476,8 +476,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 19_400_000 picoseconds. - Weight::from_parts(19_890_000, 3530) + // Minimum execution time: 29_966_000 picoseconds. + Weight::from_parts(30_393_000, 3530) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -496,10 +496,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3530` - // Minimum execution time: 3_120_000 picoseconds. - Weight::from_parts(3_310_000, 3530) - // Standard Error: 2_748 - .saturating_add(Weight::from_parts(11_489_631, 0).saturating_mul(b.into())) + // Minimum execution time: 5_693_000 picoseconds. + Weight::from_parts(109_204, 3530) + // Standard Error: 5_369 + .saturating_add(Weight::from_parts(13_553_747, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) @@ -519,10 +519,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 3_180_000 picoseconds. - Weight::from_parts(2_015_490, 3481) - // Standard Error: 6_052 - .saturating_add(Weight::from_parts(14_837_077, 0).saturating_mul(b.into())) + // Minimum execution time: 5_758_000 picoseconds. + Weight::from_parts(7_034_227, 3481) + // Standard Error: 3_531 + .saturating_add(Weight::from_parts(17_131_873, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -544,10 +544,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3481 + b * (2540 ±0)` - // Minimum execution time: 5_200_000 picoseconds. - Weight::from_parts(25_301_631, 3481) - // Standard Error: 6_177 - .saturating_add(Weight::from_parts(11_197_931, 0).saturating_mul(b.into())) + // Minimum execution time: 8_857_000 picoseconds. + Weight::from_parts(9_650_186, 3481) + // Standard Error: 3_518 + .saturating_add(Weight::from_parts(13_536_987, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -566,8 +566,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `8682` - // Minimum execution time: 29_540_000 picoseconds. - Weight::from_parts(30_190_000, 8682) + // Minimum execution time: 44_423_000 picoseconds. + Weight::from_parts(45_092_000, 8682) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -587,8 +587,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `3554` - // Minimum execution time: 30_650_000 picoseconds. - Weight::from_parts(31_370_000, 3554) + // Minimum execution time: 44_902_000 picoseconds. + Weight::from_parts(45_473_000, 3554) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -600,8 +600,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `365` // Estimated: `6118` - // Minimum execution time: 18_530_000 picoseconds. - Weight::from_parts(19_010_000, 6118) + // Minimum execution time: 27_627_000 picoseconds. + Weight::from_parts(28_046_000, 6118) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -617,8 +617,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 24_240_000 picoseconds. - Weight::from_parts(24_760_000, 6118) + // Minimum execution time: 35_866_000 picoseconds. + Weight::from_parts(36_441_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -634,8 +634,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `456` // Estimated: `6118` - // Minimum execution time: 25_990_000 picoseconds. - Weight::from_parts(26_650_000, 6118) + // Minimum execution time: 38_370_000 picoseconds. + Weight::from_parts(38_855_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -651,8 +651,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `341` // Estimated: `6118` - // Minimum execution time: 29_550_000 picoseconds. - Weight::from_parts(30_530_000, 6118) + // Minimum execution time: 43_244_000 picoseconds. + Weight::from_parts(43_649_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -664,8 +664,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3554` - // Minimum execution time: 11_420_000 picoseconds. - Weight::from_parts(11_810_000, 3554) + // Minimum execution time: 17_775_000 picoseconds. + Weight::from_parts(18_062_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -677,8 +677,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211` // Estimated: `3554` - // Minimum execution time: 11_610_000 picoseconds. - Weight::from_parts(11_950_000, 3554) + // Minimum execution time: 18_304_000 picoseconds. + Weight::from_parts(18_534_000, 3554) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -692,8 +692,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `6118` - // Minimum execution time: 28_510_000 picoseconds. - Weight::from_parts(29_180_000, 6118) + // Minimum execution time: 42_123_000 picoseconds. + Weight::from_parts(42_887_000, 6118) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -711,8 +711,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 34_370_000 picoseconds. - Weight::from_parts(35_270_000, 6118) + // Minimum execution time: 50_549_000 picoseconds. + Weight::from_parts(51_144_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -730,8 +730,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `586` // Estimated: `6118` - // Minimum execution time: 36_490_000 picoseconds. - Weight::from_parts(37_160_000, 6118) + // Minimum execution time: 52_978_000 picoseconds. + Weight::from_parts(53_931_000, 6118) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -749,8 +749,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `6118` - // Minimum execution time: 40_080_000 picoseconds. - Weight::from_parts(48_310_000, 6118) + // Minimum execution time: 57_512_000 picoseconds. + Weight::from_parts(58_148_000, 6118) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -772,8 +772,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `471` // Estimated: `3570` - // Minimum execution time: 41_100_000 picoseconds. - Weight::from_parts(42_060_000, 3570) + // Minimum execution time: 59_841_000 picoseconds. + Weight::from_parts(60_643_000, 3570) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -783,8 +783,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 2_520_000 picoseconds. - Weight::from_parts(2_670_000, 36269) + // Minimum execution time: 3_752_000 picoseconds. + Weight::from_parts(3_933_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Refungible::TokenProperties` (r:0 w:1) @@ -794,10 +794,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 490_000 picoseconds. - Weight::from_parts(3_457_547, 0) - // Standard Error: 24_239 - .saturating_add(Weight::from_parts(19_382_722, 0).saturating_mul(b.into())) + // Minimum execution time: 851_000 picoseconds. + Weight::from_parts(879_000, 0) + // Standard Error: 27_977 + .saturating_add(Weight::from_parts(37_787_661, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Common::CollectionPropertyPermissions` (r:1 w:1) @@ -807,10 +807,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314` // Estimated: `20191` - // Minimum execution time: 1_500_000 picoseconds. - Weight::from_parts(1_590_000, 20191) - // Standard Error: 123_927 - .saturating_add(Weight::from_parts(27_355_093, 0).saturating_mul(b.into())) + // Minimum execution time: 3_212_000 picoseconds. + Weight::from_parts(3_312_000, 20191) + // Standard Error: 169_099 + .saturating_add(Weight::from_parts(37_467_090, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -822,8 +822,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `288` // Estimated: `3554` - // Minimum execution time: 14_340_000 picoseconds. - Weight::from_parts(14_590_000, 3554) + // Minimum execution time: 21_917_000 picoseconds. + Weight::from_parts(22_248_000, 3554) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -833,8 +833,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_390_000 picoseconds. - Weight::from_parts(6_650_000, 0) + // Minimum execution time: 11_167_000 picoseconds. + Weight::from_parts(11_372_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Refungible::CollectionAllowance` (r:1 w:0) @@ -843,8 +843,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3576` - // Minimum execution time: 3_060_000 picoseconds. - Weight::from_parts(3_210_000, 3576) + // Minimum execution time: 4_851_000 picoseconds. + Weight::from_parts(4_988_000, 3576) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Refungible::TokenProperties` (r:1 w:1) @@ -853,8 +853,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `120` // Estimated: `36269` - // Minimum execution time: 2_480_000 picoseconds. - Weight::from_parts(2_620_000, 36269) + // Minimum execution time: 4_130_000 picoseconds. + Weight::from_parts(4_267_000, 36269) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/structure/src/weights.rs b/pallets/structure/src/weights.rs index 11fe06285f..a520b29c23 100644 --- a/pallets/structure/src/weights.rs +++ b/pallets/structure/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_structure //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/structure/src/weights.rs @@ -39,32 +39,32 @@ pub trait WeightInfo { /// Weights for pallet_structure using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:0) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:1 w:0) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) fn find_parent() -> Weight { // Proof Size summary in bytes: // Measured: `667` // Estimated: `4325` - // Minimum execution time: 7_344_000 picoseconds. - Weight::from_parts(7_578_000, 4325) + // Minimum execution time: 10_392_000 picoseconds. + Weight::from_parts(10_615_000, 4325) .saturating_add(T::DbWeight::get().reads(2_u64)) } } // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:0) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:1 w:0) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) fn find_parent() -> Weight { // Proof Size summary in bytes: // Measured: `667` // Estimated: `4325` - // Minimum execution time: 7_344_000 picoseconds. - Weight::from_parts(7_578_000, 4325) + // Minimum execution time: 10_392_000 picoseconds. + Weight::from_parts(10_615_000, 4325) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/pallets/unique/src/weights.rs b/pallets/unique/src/weights.rs index 7eb0553f02..ff7bf29258 100644 --- a/pallets/unique/src/weights.rs +++ b/pallets/unique/src/weights.rs @@ -3,13 +3,13 @@ //! Autogenerated weights for pallet_unique //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: `400`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: `80`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `bench-host`, CPU: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -20,7 +20,7 @@ // * // --template=.maintain/frame-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./pallets/unique/src/weights.rs @@ -51,182 +51,182 @@ pub trait WeightInfo { /// Weights for pallet_unique using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Common CreatedCollectionCount (r:1 w:1) - /// Proof: Common CreatedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Common DestroyedCollectionCount (r:1 w:0) - /// Proof: Common DestroyedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Common AdminAmount (r:0 w:1) - /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:0 w:1) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Common CollectionProperties (r:0 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) - /// Storage: Common CollectionById (r:0 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CreatedCollectionCount` (r:1 w:1) + /// Proof: `Common::CreatedCollectionCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Common::DestroyedCollectionCount` (r:1 w:0) + /// Proof: `Common::DestroyedCollectionCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Common::AdminAmount` (r:0 w:1) + /// Proof: `Common::AdminAmount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionPropertyPermissions` (r:0 w:1) + /// Proof: `Common::CollectionPropertyPermissions` (`max_values`: None, `max_size`: Some(16726), added: 19201, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionProperties` (r:0 w:1) + /// Proof: `Common::CollectionProperties` (`max_values`: None, `max_size`: Some(40992), added: 43467, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionById` (r:0 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn create_collection() -> Weight { // Proof Size summary in bytes: // Measured: `245` // Estimated: `6196` - // Minimum execution time: 26_618_000 picoseconds. - Weight::from_parts(27_287_000, 6196) + // Minimum execution time: 59_642_000 picoseconds. + Weight::from_parts(60_286_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Common DestroyedCollectionCount (r:1 w:1) - /// Proof: Common DestroyedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensMinted (r:0 w:1) - /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensBurnt (r:0 w:1) - /// Proof: Nonfungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Common AdminAmount (r:0 w:1) - /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: Common CollectionProperties (r:0 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:1 w:0) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Common::DestroyedCollectionCount` (r:1 w:1) + /// Proof: `Common::DestroyedCollectionCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokensMinted` (r:0 w:1) + /// Proof: `Nonfungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokensBurnt` (r:0 w:1) + /// Proof: `Nonfungible::TokensBurnt` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Common::AdminAmount` (r:0 w:1) + /// Proof: `Common::AdminAmount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionProperties` (r:0 w:1) + /// Proof: `Common::CollectionProperties` (`max_values`: None, `max_size`: Some(40992), added: 43467, mode: `MaxEncodedLen`) fn destroy_collection() -> Weight { // Proof Size summary in bytes: // Measured: `1200` // Estimated: `4325` - // Minimum execution time: 37_428_000 picoseconds. - Weight::from_parts(38_258_000, 4325) + // Minimum execution time: 63_083_000 picoseconds. + Weight::from_parts(63_928_000, 4325) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Common Allowlist (r:0 w:1) - /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:0) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Common::Allowlist` (r:0 w:1) + /// Proof: `Common::Allowlist` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) fn add_to_allow_list() -> Weight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 9_968_000 picoseconds. - Weight::from_parts(10_388_000, 4325) + // Minimum execution time: 18_654_000 picoseconds. + Weight::from_parts(18_932_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Common Allowlist (r:0 w:1) - /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:0) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Common::Allowlist` (r:0 w:1) + /// Proof: `Common::Allowlist` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) fn remove_from_allow_list() -> Weight { // Proof Size summary in bytes: // Measured: `1033` // Estimated: `4325` - // Minimum execution time: 9_600_000 picoseconds. - Weight::from_parts(9_974_000, 4325) + // Minimum execution time: 18_361_000 picoseconds. + Weight::from_parts(18_584_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn change_collection_owner() -> Weight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 9_185_000 picoseconds. - Weight::from_parts(9_525_000, 4325) + // Minimum execution time: 17_058_000 picoseconds. + Weight::from_parts(17_362_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Common IsAdmin (r:1 w:1) - /// Proof: Common IsAdmin (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) - /// Storage: Common AdminAmount (r:1 w:1) - /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:0) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Common::IsAdmin` (r:1 w:1) + /// Proof: `Common::IsAdmin` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) + /// Storage: `Common::AdminAmount` (r:1 w:1) + /// Proof: `Common::AdminAmount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) fn add_collection_admin() -> Weight { // Proof Size summary in bytes: // Measured: `1012` // Estimated: `4325` - // Minimum execution time: 12_704_000 picoseconds. - Weight::from_parts(13_115_000, 4325) + // Minimum execution time: 24_063_000 picoseconds. + Weight::from_parts(24_374_000, 4325) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Common IsAdmin (r:1 w:1) - /// Proof: Common IsAdmin (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) - /// Storage: Common AdminAmount (r:1 w:1) - /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:0) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Common::IsAdmin` (r:1 w:1) + /// Proof: `Common::IsAdmin` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) + /// Storage: `Common::AdminAmount` (r:1 w:1) + /// Proof: `Common::AdminAmount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) fn remove_collection_admin() -> Weight { // Proof Size summary in bytes: // Measured: `1107` // Estimated: `4325` - // Minimum execution time: 14_185_000 picoseconds. - Weight::from_parts(14_492_000, 4325) + // Minimum execution time: 25_196_000 picoseconds. + Weight::from_parts(25_670_000, 4325) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn set_collection_sponsor() -> Weight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 9_217_000 picoseconds. - Weight::from_parts(9_499_000, 4325) + // Minimum execution time: 17_058_000 picoseconds. + Weight::from_parts(17_388_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn confirm_sponsorship() -> Weight { // Proof Size summary in bytes: // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 8_993_000 picoseconds. - Weight::from_parts(9_264_000, 4325) + // Minimum execution time: 16_730_000 picoseconds. + Weight::from_parts(16_965_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn remove_collection_sponsor() -> Weight { // Proof Size summary in bytes: // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 8_804_000 picoseconds. - Weight::from_parts(9_302_000, 4325) + // Minimum execution time: 16_504_000 picoseconds. + Weight::from_parts(16_836_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn set_transfers_enabled_flag() -> Weight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 5_985_000 picoseconds. - Weight::from_parts(6_155_000, 4325) + // Minimum execution time: 10_285_000 picoseconds. + Weight::from_parts(10_505_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn set_collection_limits() -> Weight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 9_288_000 picoseconds. - Weight::from_parts(9_608_000, 4325) + // Minimum execution time: 17_160_000 picoseconds. + Weight::from_parts(17_427_000, 4325) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionProperties (r:1 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) + /// Storage: `Common::CollectionProperties` (r:1 w:1) + /// Proof: `Common::CollectionProperties` (`max_values`: None, `max_size`: Some(40992), added: 43467, mode: `MaxEncodedLen`) fn force_repair_collection() -> Weight { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 4_904_000 picoseconds. - Weight::from_parts(5_142_000, 44457) + // Minimum execution time: 7_771_000 picoseconds. + Weight::from_parts(8_037_000, 44457) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -234,182 +234,182 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: Common CreatedCollectionCount (r:1 w:1) - /// Proof: Common CreatedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Common DestroyedCollectionCount (r:1 w:0) - /// Proof: Common DestroyedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Common AdminAmount (r:0 w:1) - /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: Common CollectionPropertyPermissions (r:0 w:1) - /// Proof: Common CollectionPropertyPermissions (max_values: None, max_size: Some(16726), added: 19201, mode: MaxEncodedLen) - /// Storage: Common CollectionProperties (r:0 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) - /// Storage: Common CollectionById (r:0 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CreatedCollectionCount` (r:1 w:1) + /// Proof: `Common::CreatedCollectionCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Common::DestroyedCollectionCount` (r:1 w:0) + /// Proof: `Common::DestroyedCollectionCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Common::AdminAmount` (r:0 w:1) + /// Proof: `Common::AdminAmount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionPropertyPermissions` (r:0 w:1) + /// Proof: `Common::CollectionPropertyPermissions` (`max_values`: None, `max_size`: Some(16726), added: 19201, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionProperties` (r:0 w:1) + /// Proof: `Common::CollectionProperties` (`max_values`: None, `max_size`: Some(40992), added: 43467, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionById` (r:0 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn create_collection() -> Weight { // Proof Size summary in bytes: // Measured: `245` // Estimated: `6196` - // Minimum execution time: 26_618_000 picoseconds. - Weight::from_parts(27_287_000, 6196) + // Minimum execution time: 59_642_000 picoseconds. + Weight::from_parts(60_286_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Nonfungible TokenData (r:1 w:0) - /// Proof: Nonfungible TokenData (max_values: None, max_size: Some(57), added: 2532, mode: MaxEncodedLen) - /// Storage: Common DestroyedCollectionCount (r:1 w:1) - /// Proof: Common DestroyedCollectionCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensMinted (r:0 w:1) - /// Proof: Nonfungible TokensMinted (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Nonfungible TokensBurnt (r:0 w:1) - /// Proof: Nonfungible TokensBurnt (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Common AdminAmount (r:0 w:1) - /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: Common CollectionProperties (r:0 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokenData` (r:1 w:0) + /// Proof: `Nonfungible::TokenData` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Common::DestroyedCollectionCount` (r:1 w:1) + /// Proof: `Common::DestroyedCollectionCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokensMinted` (r:0 w:1) + /// Proof: `Nonfungible::TokensMinted` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Nonfungible::TokensBurnt` (r:0 w:1) + /// Proof: `Nonfungible::TokensBurnt` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Common::AdminAmount` (r:0 w:1) + /// Proof: `Common::AdminAmount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Common::CollectionProperties` (r:0 w:1) + /// Proof: `Common::CollectionProperties` (`max_values`: None, `max_size`: Some(40992), added: 43467, mode: `MaxEncodedLen`) fn destroy_collection() -> Weight { // Proof Size summary in bytes: // Measured: `1200` // Estimated: `4325` - // Minimum execution time: 37_428_000 picoseconds. - Weight::from_parts(38_258_000, 4325) + // Minimum execution time: 63_083_000 picoseconds. + Weight::from_parts(63_928_000, 4325) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Common Allowlist (r:0 w:1) - /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:0) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Common::Allowlist` (r:0 w:1) + /// Proof: `Common::Allowlist` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) fn add_to_allow_list() -> Weight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 9_968_000 picoseconds. - Weight::from_parts(10_388_000, 4325) + // Minimum execution time: 18_654_000 picoseconds. + Weight::from_parts(18_932_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Common Allowlist (r:0 w:1) - /// Proof: Common Allowlist (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:0) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Common::Allowlist` (r:0 w:1) + /// Proof: `Common::Allowlist` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) fn remove_from_allow_list() -> Weight { // Proof Size summary in bytes: // Measured: `1033` // Estimated: `4325` - // Minimum execution time: 9_600_000 picoseconds. - Weight::from_parts(9_974_000, 4325) + // Minimum execution time: 18_361_000 picoseconds. + Weight::from_parts(18_584_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn change_collection_owner() -> Weight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 9_185_000 picoseconds. - Weight::from_parts(9_525_000, 4325) + // Minimum execution time: 17_058_000 picoseconds. + Weight::from_parts(17_362_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Common IsAdmin (r:1 w:1) - /// Proof: Common IsAdmin (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) - /// Storage: Common AdminAmount (r:1 w:1) - /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:0) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Common::IsAdmin` (r:1 w:1) + /// Proof: `Common::IsAdmin` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) + /// Storage: `Common::AdminAmount` (r:1 w:1) + /// Proof: `Common::AdminAmount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) fn add_collection_admin() -> Weight { // Proof Size summary in bytes: // Measured: `1012` // Estimated: `4325` - // Minimum execution time: 12_704_000 picoseconds. - Weight::from_parts(13_115_000, 4325) + // Minimum execution time: 24_063_000 picoseconds. + Weight::from_parts(24_374_000, 4325) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Common CollectionById (r:1 w:0) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) - /// Storage: Common IsAdmin (r:1 w:1) - /// Proof: Common IsAdmin (max_values: None, max_size: Some(70), added: 2545, mode: MaxEncodedLen) - /// Storage: Common AdminAmount (r:1 w:1) - /// Proof: Common AdminAmount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:0) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) + /// Storage: `Common::IsAdmin` (r:1 w:1) + /// Proof: `Common::IsAdmin` (`max_values`: None, `max_size`: Some(70), added: 2545, mode: `MaxEncodedLen`) + /// Storage: `Common::AdminAmount` (r:1 w:1) + /// Proof: `Common::AdminAmount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) fn remove_collection_admin() -> Weight { // Proof Size summary in bytes: // Measured: `1107` // Estimated: `4325` - // Minimum execution time: 14_185_000 picoseconds. - Weight::from_parts(14_492_000, 4325) + // Minimum execution time: 25_196_000 picoseconds. + Weight::from_parts(25_670_000, 4325) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn set_collection_sponsor() -> Weight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 9_217_000 picoseconds. - Weight::from_parts(9_499_000, 4325) + // Minimum execution time: 17_058_000 picoseconds. + Weight::from_parts(17_388_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn confirm_sponsorship() -> Weight { // Proof Size summary in bytes: // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 8_993_000 picoseconds. - Weight::from_parts(9_264_000, 4325) + // Minimum execution time: 16_730_000 picoseconds. + Weight::from_parts(16_965_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn remove_collection_sponsor() -> Weight { // Proof Size summary in bytes: // Measured: `1032` // Estimated: `4325` - // Minimum execution time: 8_804_000 picoseconds. - Weight::from_parts(9_302_000, 4325) + // Minimum execution time: 16_504_000 picoseconds. + Weight::from_parts(16_836_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn set_transfers_enabled_flag() -> Weight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 5_985_000 picoseconds. - Weight::from_parts(6_155_000, 4325) + // Minimum execution time: 10_285_000 picoseconds. + Weight::from_parts(10_505_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionById (r:1 w:1) - /// Proof: Common CollectionById (max_values: None, max_size: Some(860), added: 3335, mode: MaxEncodedLen) + /// Storage: `Common::CollectionById` (r:1 w:1) + /// Proof: `Common::CollectionById` (`max_values`: None, `max_size`: Some(860), added: 3335, mode: `MaxEncodedLen`) fn set_collection_limits() -> Weight { // Proof Size summary in bytes: // Measured: `1000` // Estimated: `4325` - // Minimum execution time: 9_288_000 picoseconds. - Weight::from_parts(9_608_000, 4325) + // Minimum execution time: 17_160_000 picoseconds. + Weight::from_parts(17_427_000, 4325) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Common CollectionProperties (r:1 w:1) - /// Proof: Common CollectionProperties (max_values: None, max_size: Some(40992), added: 43467, mode: MaxEncodedLen) + /// Storage: `Common::CollectionProperties` (r:1 w:1) + /// Proof: `Common::CollectionProperties` (`max_values`: None, `max_size`: Some(40992), added: 43467, mode: `MaxEncodedLen`) fn force_repair_collection() -> Weight { // Proof Size summary in bytes: // Measured: `298` // Estimated: `44457` - // Minimum execution time: 4_904_000 picoseconds. - Weight::from_parts(5_142_000, 44457) + // Minimum execution time: 7_771_000 picoseconds. + Weight::from_parts(8_037_000, 44457) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/primitives/common/src/constants.rs b/primitives/common/src/constants.rs index df7ae1db60..e1e6f88cb3 100644 --- a/primitives/common/src/constants.rs +++ b/primitives/common/src/constants.rs @@ -57,10 +57,10 @@ pub const MAX_COLLATORS: u32 = 10; pub const SESSION_LENGTH: BlockNumber = HOURS; // Targeting 0.1 UNQ per transfer -pub const WEIGHT_TO_FEE_COEFF: u64 = /**/77_334_604_063_436_322/**/; +pub const WEIGHT_TO_FEE_COEFF: u64 = /**/74_401_761_267_585_092/**/; // Targeting 0.15 UNQ per transfer via ETH -pub const MIN_GAS_PRICE: u64 = /**/1_920_639_188_722/**/; +pub const MIN_GAS_PRICE: u64 = /**/1_873_477_799_288/**/; /// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. /// This is used to limit the maximal weight of a single extrinsic. diff --git a/runtime/common/weights/xcm.rs b/runtime/common/weights/xcm.rs index a9ad8b1ca7..7e9690f71b 100644 --- a/runtime/common/weights/xcm.rs +++ b/runtime/common/weights/xcm.rs @@ -3,12 +3,12 @@ //! Autogenerated weights for pallet_xcm //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-26, STEPS: `50`, REPEAT: 400, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-10-13, STEPS: `50`, REPEAT: 80, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 // Executed Command: -// target/production/unique-collator +// ./target/production/unique-collator // benchmark // pallet // --pallet @@ -19,7 +19,7 @@ // * // --template=.maintain/external-weight-template.hbs // --steps=50 -// --repeat=400 +// --repeat=80 // --heap-pages=4096 // --output=./runtime/common/weights/xcm.rs @@ -35,222 +35,222 @@ use sp_std::marker::PhantomData; /// Weights for pallet_xcm using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl pallet_xcm::WeightInfo for SubstrateWeight { - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: // Measured: `278` // Estimated: `3743` - // Minimum execution time: 12_999_000 picoseconds. - Weight::from_parts(13_426_000, 3743) + // Minimum execution time: 27_564_000 picoseconds. + Weight::from_parts(28_000_000, 3743) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 10_299_000 picoseconds. - Weight::from_parts(10_647_000, 1489) + // Minimum execution time: 26_239_000 picoseconds. + Weight::from_parts(26_609_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 10_094_000 picoseconds. - Weight::from_parts(10_464_000, 1489) + // Minimum execution time: 26_032_000 picoseconds. + Weight::from_parts(26_417_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_485_000 picoseconds. - Weight::from_parts(3_664_000, 0) + // Minimum execution time: 9_322_000 picoseconds. + Weight::from_parts(9_573_000, 0) } - /// Storage: PolkadotXcm SupportedVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_717_000 picoseconds. - Weight::from_parts(3_866_000, 0) + // Minimum execution time: 9_125_000 picoseconds. + Weight::from_parts(9_367_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_default_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_328_000 picoseconds. - Weight::from_parts(1_400_000, 0) + // Minimum execution time: 2_923_000 picoseconds. + Weight::from_parts(3_053_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm QueryCounter (r:1 w:1) - /// Proof Skipped: PolkadotXcm QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: // Measured: `278` // Estimated: `3743` - // Minimum execution time: 16_057_000 picoseconds. - Weight::from_parts(16_483_000, 3743) + // Minimum execution time: 31_942_000 picoseconds. + Weight::from_parts(32_568_000, 3743) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: // Measured: `461` // Estimated: `3926` - // Minimum execution time: 18_009_000 picoseconds. - Weight::from_parts(18_565_000, 3926) + // Minimum execution time: 32_521_000 picoseconds. + Weight::from_parts(32_922_000, 3926) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: PolkadotXcm XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: PolkadotXcm XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_378_000 picoseconds. - Weight::from_parts(1_447_000, 0) + // Minimum execution time: 3_026_000 picoseconds. + Weight::from_parts(3_126_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: PolkadotXcm SupportedVersion (r:4 w:2) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `196` // Estimated: `11086` - // Minimum execution time: 10_770_000 picoseconds. - Weight::from_parts(11_090_000, 11086) + // Minimum execution time: 16_157_000 picoseconds. + Weight::from_parts(16_556_000, 11086) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: PolkadotXcm VersionNotifiers (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `200` // Estimated: `11090` - // Minimum execution time: 10_760_000 picoseconds. - Weight::from_parts(11_091_000, 11090) + // Minimum execution time: 16_300_000 picoseconds. + Weight::from_parts(16_774_000, 11090) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `207` // Estimated: `13572` - // Minimum execution time: 12_026_000 picoseconds. - Weight::from_parts(12_321_000, 13572) + // Minimum execution time: 16_948_000 picoseconds. + Weight::from_parts(17_328_000, 13572) .saturating_add(T::DbWeight::get().reads(5_u64)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: // Measured: `345` // Estimated: `6285` - // Minimum execution time: 15_508_000 picoseconds. - Weight::from_parts(15_885_000, 6285) + // Minimum execution time: 29_235_000 picoseconds. + Weight::from_parts(29_652_000, 6285) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `8654` - // Minimum execution time: 5_580_000 picoseconds. - Weight::from_parts(5_753_000, 8654) + // Minimum execution time: 8_453_000 picoseconds. + Weight::from_parts(8_742_000, 8654) .saturating_add(T::DbWeight::get().reads(3_u64)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `207` // Estimated: `11097` - // Minimum execution time: 10_951_000 picoseconds. - Weight::from_parts(11_341_000, 11097) + // Minimum execution time: 16_522_000 picoseconds. + Weight::from_parts(16_863_000, 11097) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `11239` - // Minimum execution time: 19_990_000 picoseconds. - Weight::from_parts(20_433_000, 11239) + // Minimum execution time: 35_386_000 picoseconds. + Weight::from_parts(35_854_000, 11239) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } From 8af05a8ac34d4c3f849ced88be081dcd294d6200 Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Tue, 17 Oct 2023 12:08:39 +0200 Subject: [PATCH 142/143] build: switch to released ORML --- Cargo.lock | 12 ++++++------ Cargo.toml | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 145f688950..d25d9b0364 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6613,7 +6613,7 @@ dependencies = [ [[package]] name = "orml-tokens" version = "0.4.1-dev" -source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" +source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v1.1.0#b3694e631df7f1ca16b1973122937753fcdee9d4" dependencies = [ "frame-support", "frame-system", @@ -6630,7 +6630,7 @@ dependencies = [ [[package]] name = "orml-traits" version = "0.4.1-dev" -source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" +source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v1.1.0#b3694e631df7f1ca16b1973122937753fcdee9d4" dependencies = [ "frame-support", "impl-trait-for-tuples", @@ -6650,7 +6650,7 @@ dependencies = [ [[package]] name = "orml-utilities" version = "0.4.1-dev" -source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" +source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v1.1.0#b3694e631df7f1ca16b1973122937753fcdee9d4" dependencies = [ "frame-support", "parity-scale-codec", @@ -6665,7 +6665,7 @@ dependencies = [ [[package]] name = "orml-vesting" version = "0.4.1-dev" -source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" +source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v1.1.0#b3694e631df7f1ca16b1973122937753fcdee9d4" dependencies = [ "frame-support", "frame-system", @@ -6680,7 +6680,7 @@ dependencies = [ [[package]] name = "orml-xcm-support" version = "0.4.1-dev" -source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" +source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v1.1.0#b3694e631df7f1ca16b1973122937753fcdee9d4" dependencies = [ "frame-support", "orml-traits", @@ -6694,7 +6694,7 @@ dependencies = [ [[package]] name = "orml-xtokens" version = "0.4.1-dev" -source = "git+https://github.com/moonbeam-foundation/open-runtime-module-library?branch=upgrade-to-polkadot-v1.1.0#26b6fd59ab71429604a81c99ac093d20982c5459" +source = "git+https://github.com/open-web3-stack/open-runtime-module-library?branch=polkadot-v1.1.0#b3694e631df7f1ca16b1973122937753fcdee9d4" dependencies = [ "cumulus-primitives-core", "frame-support", diff --git a/Cargo.toml b/Cargo.toml index 748f2ada65..38da75fe7a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -198,11 +198,11 @@ frame-try-runtime = { default-features = false, git = "https://github.com/parity try-runtime-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } # ORML -orml-tokens = { default-features = false, git = "https://github.com/moonbeam-foundation/open-runtime-module-library", branch = "upgrade-to-polkadot-v1.1.0" } -orml-traits = { default-features = false, git = "https://github.com/moonbeam-foundation/open-runtime-module-library", branch = "upgrade-to-polkadot-v1.1.0" } -orml-vesting = { default-features = false, git = "https://github.com/moonbeam-foundation/open-runtime-module-library", branch = "upgrade-to-polkadot-v1.1.0" } -orml-xcm-support = { default-features = false, git = "https://github.com/moonbeam-foundation/open-runtime-module-library", branch = "upgrade-to-polkadot-v1.1.0" } -orml-xtokens = { default-features = false, git = "https://github.com/moonbeam-foundation/open-runtime-module-library", branch = "upgrade-to-polkadot-v1.1.0" } +orml-tokens = { default-features = false, git = "https://github.com/open-web3-stack/open-runtime-module-library", branch = "polkadot-v1.1.0" } +orml-traits = { default-features = false, git = "https://github.com/open-web3-stack/open-runtime-module-library", branch = "polkadot-v1.1.0" } +orml-vesting = { default-features = false, git = "https://github.com/open-web3-stack/open-runtime-module-library", branch = "polkadot-v1.1.0" } +orml-xcm-support = { default-features = false, git = "https://github.com/open-web3-stack/open-runtime-module-library", branch = "polkadot-v1.1.0" } +orml-xtokens = { default-features = false, git = "https://github.com/open-web3-stack/open-runtime-module-library", branch = "polkadot-v1.1.0" } # Other derivative = { version = "2.2.0", features = ["use_core"] } From 092e9b8f020dffef1294c9a2c56bb74a40c83fc0 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Tue, 17 Oct 2023 18:39:42 +0200 Subject: [PATCH 143/143] feat: propose-upgrade governance script (#1019) * feat: governance scripts * fix: move propose-upgrade script to tests --- tests/package.json | 5 +++-- tests/src/proposeupgrade.ts | 39 +++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 2 deletions(-) create mode 100644 tests/src/proposeupgrade.ts diff --git a/tests/package.json b/tests/package.json index 01ca1ad5e8..c669f54d63 100644 --- a/tests/package.json +++ b/tests/package.json @@ -130,7 +130,8 @@ "polkadot-types-from-defs": "ts-node --esm ./node_modules/.bin/polkadot-types-from-defs --endpoint src/interfaces/metadata.json --input src/interfaces/ --package .", "polkadot-types-from-chain": "ts-node --esm ./node_modules/.bin/polkadot-types-from-chain --endpoint src/interfaces/metadata.json --output src/interfaces/ --package .", "polkadot-types": "echo \"export default {}\" > src/interfaces/lookup.ts && yarn polkadot-types-fetch-metadata && yarn polkadot-types-from-defs && yarn polkadot-types-from-defs && yarn polkadot-types-from-chain", - "generateEnv": "ts-node --esm ./src/generateEnv.ts" + "generateEnv": "ts-node --esm ./src/generateEnv.ts", + "propose-upgrade": "ts-node --esm ./src/proposeupgrade.ts" }, "author": "", "license": "SEE LICENSE IN ../LICENSE", @@ -159,4 +160,4 @@ }, "type": "module", "packageManager": "yarn@3.6.1" -} \ No newline at end of file +} diff --git a/tests/src/proposeupgrade.ts b/tests/src/proposeupgrade.ts new file mode 100644 index 0000000000..a008f7ec40 --- /dev/null +++ b/tests/src/proposeupgrade.ts @@ -0,0 +1,39 @@ +import {ApiPromise, WsProvider} from '@polkadot/api'; +import {blake2AsHex} from '@polkadot/util-crypto'; +import {readFileSync} from 'fs'; + +async function main() { + const networkUrl = process.argv[2]; + const wasmFile = process.argv[3]; + + const wsProvider = new WsProvider(networkUrl); + const api = await ApiPromise.create({provider: wsProvider}); + + const wasmFileBytes = readFileSync(wasmFile); + const wasmFileHash = blake2AsHex(wasmFileBytes, 256); + + const authorizeUpgrade = api.tx.parachainSystem.authorizeUpgrade(wasmFileHash, true); + + const councilMembers = (await api.query.council.members()).toJSON() as any[]; + const councilProposalThreshold = Math.floor(councilMembers.length / 2) + 1; + + const democracyProposal = api.tx.democracy.externalProposeDefault({ + Inline: authorizeUpgrade.method.toHex(), + }); + + const councilProposal = api.tx.council.propose( + councilProposalThreshold, + democracyProposal, + democracyProposal.method.encodedLength, + ); + + const encodedCall = councilProposal.method.toHex(); + + console.log('-----------------'); + console.log('Upgrade Proposal: ', `https://polkadot.js.org/apps/?rpc=${networkUrl}#/extrinsics/decode/${encodedCall}`); + console.log('-----------------'); + + await api.disconnect(); +} + +await main();